bfa_svc.c 113 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_plog.h"
  19. #include "bfa_cs.h"
  20. #include "bfa_modules.h"
  21. BFA_TRC_FILE(HAL, FCXP);
  22. BFA_MODULE(fcxp);
  23. BFA_MODULE(sgpg);
  24. BFA_MODULE(lps);
  25. BFA_MODULE(fcport);
  26. BFA_MODULE(rport);
  27. BFA_MODULE(uf);
  28. /*
  29. * LPS related definitions
  30. */
  31. #define BFA_LPS_MIN_LPORTS (1)
  32. #define BFA_LPS_MAX_LPORTS (256)
  33. /*
  34. * Maximum Vports supported per physical port or vf.
  35. */
  36. #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
  37. #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
  38. /*
  39. * FC PORT related definitions
  40. */
  41. /*
  42. * The port is considered disabled if corresponding physical port or IOC are
  43. * disabled explicitly
  44. */
  45. #define BFA_PORT_IS_DISABLED(bfa) \
  46. ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
  47. (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
  48. /*
  49. * BFA port state machine events
  50. */
  51. enum bfa_fcport_sm_event {
  52. BFA_FCPORT_SM_START = 1, /* start port state machine */
  53. BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
  54. BFA_FCPORT_SM_ENABLE = 3, /* enable port */
  55. BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
  56. BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
  57. BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
  58. BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
  59. BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
  60. BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
  61. };
  62. /*
  63. * BFA port link notification state machine events
  64. */
  65. enum bfa_fcport_ln_sm_event {
  66. BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
  67. BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
  68. BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
  69. };
  70. /*
  71. * RPORT related definitions
  72. */
  73. #define bfa_rport_offline_cb(__rp) do { \
  74. if ((__rp)->bfa->fcs) \
  75. bfa_cb_rport_offline((__rp)->rport_drv); \
  76. else { \
  77. bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
  78. __bfa_cb_rport_offline, (__rp)); \
  79. } \
  80. } while (0)
  81. #define bfa_rport_online_cb(__rp) do { \
  82. if ((__rp)->bfa->fcs) \
  83. bfa_cb_rport_online((__rp)->rport_drv); \
  84. else { \
  85. bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
  86. __bfa_cb_rport_online, (__rp)); \
  87. } \
  88. } while (0)
  89. /*
  90. * forward declarations FCXP related functions
  91. */
  92. static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
  93. static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
  94. struct bfi_fcxp_send_rsp_s *fcxp_rsp);
  95. static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
  96. struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
  97. static void bfa_fcxp_qresume(void *cbarg);
  98. static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
  99. struct bfi_fcxp_send_req_s *send_req);
  100. /*
  101. * forward declarations for LPS functions
  102. */
  103. static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
  104. u32 *dm_len);
  105. static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
  106. struct bfa_iocfc_cfg_s *cfg,
  107. struct bfa_meminfo_s *meminfo,
  108. struct bfa_pcidev_s *pcidev);
  109. static void bfa_lps_detach(struct bfa_s *bfa);
  110. static void bfa_lps_start(struct bfa_s *bfa);
  111. static void bfa_lps_stop(struct bfa_s *bfa);
  112. static void bfa_lps_iocdisable(struct bfa_s *bfa);
  113. static void bfa_lps_login_rsp(struct bfa_s *bfa,
  114. struct bfi_lps_login_rsp_s *rsp);
  115. static void bfa_lps_logout_rsp(struct bfa_s *bfa,
  116. struct bfi_lps_logout_rsp_s *rsp);
  117. static void bfa_lps_reqq_resume(void *lps_arg);
  118. static void bfa_lps_free(struct bfa_lps_s *lps);
  119. static void bfa_lps_send_login(struct bfa_lps_s *lps);
  120. static void bfa_lps_send_logout(struct bfa_lps_s *lps);
  121. static void bfa_lps_login_comp(struct bfa_lps_s *lps);
  122. static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
  123. static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
  124. /*
  125. * forward declaration for LPS state machine
  126. */
  127. static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
  128. static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
  129. static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
  130. event);
  131. static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
  132. static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
  133. static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
  134. event);
  135. /*
  136. * forward declaration for FC Port functions
  137. */
  138. static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
  139. static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
  140. static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
  141. static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
  142. static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
  143. static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
  144. static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
  145. enum bfa_port_linkstate event, bfa_boolean_t trunk);
  146. static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
  147. enum bfa_port_linkstate event);
  148. static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
  149. static void bfa_fcport_stats_get_timeout(void *cbarg);
  150. static void bfa_fcport_stats_clr_timeout(void *cbarg);
  151. static void bfa_trunk_iocdisable(struct bfa_s *bfa);
  152. /*
  153. * forward declaration for FC PORT state machine
  154. */
  155. static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
  156. enum bfa_fcport_sm_event event);
  157. static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
  158. enum bfa_fcport_sm_event event);
  159. static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
  160. enum bfa_fcport_sm_event event);
  161. static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
  162. enum bfa_fcport_sm_event event);
  163. static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
  164. enum bfa_fcport_sm_event event);
  165. static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
  166. enum bfa_fcport_sm_event event);
  167. static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
  168. enum bfa_fcport_sm_event event);
  169. static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
  170. enum bfa_fcport_sm_event event);
  171. static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
  172. enum bfa_fcport_sm_event event);
  173. static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
  174. enum bfa_fcport_sm_event event);
  175. static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
  176. enum bfa_fcport_sm_event event);
  177. static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
  178. enum bfa_fcport_sm_event event);
  179. static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
  180. enum bfa_fcport_ln_sm_event event);
  181. static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
  182. enum bfa_fcport_ln_sm_event event);
  183. static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
  184. enum bfa_fcport_ln_sm_event event);
  185. static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
  186. enum bfa_fcport_ln_sm_event event);
  187. static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
  188. enum bfa_fcport_ln_sm_event event);
  189. static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
  190. enum bfa_fcport_ln_sm_event event);
  191. static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
  192. enum bfa_fcport_ln_sm_event event);
  193. static struct bfa_sm_table_s hal_port_sm_table[] = {
  194. {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
  195. {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
  196. {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
  197. {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
  198. {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
  199. {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
  200. {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
  201. {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
  202. {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
  203. {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
  204. {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
  205. {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
  206. };
  207. /*
  208. * forward declaration for RPORT related functions
  209. */
  210. static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
  211. static void bfa_rport_free(struct bfa_rport_s *rport);
  212. static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
  213. static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
  214. static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
  215. static void __bfa_cb_rport_online(void *cbarg,
  216. bfa_boolean_t complete);
  217. static void __bfa_cb_rport_offline(void *cbarg,
  218. bfa_boolean_t complete);
  219. /*
  220. * forward declaration for RPORT state machine
  221. */
  222. static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
  223. enum bfa_rport_event event);
  224. static void bfa_rport_sm_created(struct bfa_rport_s *rp,
  225. enum bfa_rport_event event);
  226. static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
  227. enum bfa_rport_event event);
  228. static void bfa_rport_sm_online(struct bfa_rport_s *rp,
  229. enum bfa_rport_event event);
  230. static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
  231. enum bfa_rport_event event);
  232. static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
  233. enum bfa_rport_event event);
  234. static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
  235. enum bfa_rport_event event);
  236. static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
  237. enum bfa_rport_event event);
  238. static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
  239. enum bfa_rport_event event);
  240. static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
  241. enum bfa_rport_event event);
  242. static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
  243. enum bfa_rport_event event);
  244. static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
  245. enum bfa_rport_event event);
  246. static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
  247. enum bfa_rport_event event);
  248. /*
  249. * PLOG related definitions
  250. */
  251. static int
  252. plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
  253. {
  254. if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
  255. (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
  256. return 1;
  257. if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
  258. (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
  259. return 1;
  260. return 0;
  261. }
  262. static u64
  263. bfa_get_log_time(void)
  264. {
  265. u64 system_time = 0;
  266. struct timeval tv;
  267. do_gettimeofday(&tv);
  268. /* We are interested in seconds only. */
  269. system_time = tv.tv_sec;
  270. return system_time;
  271. }
  272. static void
  273. bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
  274. {
  275. u16 tail;
  276. struct bfa_plog_rec_s *pl_recp;
  277. if (plog->plog_enabled == 0)
  278. return;
  279. if (plkd_validate_logrec(pl_rec)) {
  280. bfa_assert(0);
  281. return;
  282. }
  283. tail = plog->tail;
  284. pl_recp = &(plog->plog_recs[tail]);
  285. memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
  286. pl_recp->tv = bfa_get_log_time();
  287. BFA_PL_LOG_REC_INCR(plog->tail);
  288. if (plog->head == plog->tail)
  289. BFA_PL_LOG_REC_INCR(plog->head);
  290. }
  291. void
  292. bfa_plog_init(struct bfa_plog_s *plog)
  293. {
  294. memset((char *)plog, 0, sizeof(struct bfa_plog_s));
  295. memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
  296. plog->head = plog->tail = 0;
  297. plog->plog_enabled = 1;
  298. }
  299. void
  300. bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  301. enum bfa_plog_eid event,
  302. u16 misc, char *log_str)
  303. {
  304. struct bfa_plog_rec_s lp;
  305. if (plog->plog_enabled) {
  306. memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
  307. lp.mid = mid;
  308. lp.eid = event;
  309. lp.log_type = BFA_PL_LOG_TYPE_STRING;
  310. lp.misc = misc;
  311. strncpy(lp.log_entry.string_log, log_str,
  312. BFA_PL_STRING_LOG_SZ - 1);
  313. lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
  314. bfa_plog_add(plog, &lp);
  315. }
  316. }
  317. void
  318. bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  319. enum bfa_plog_eid event,
  320. u16 misc, u32 *intarr, u32 num_ints)
  321. {
  322. struct bfa_plog_rec_s lp;
  323. u32 i;
  324. if (num_ints > BFA_PL_INT_LOG_SZ)
  325. num_ints = BFA_PL_INT_LOG_SZ;
  326. if (plog->plog_enabled) {
  327. memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
  328. lp.mid = mid;
  329. lp.eid = event;
  330. lp.log_type = BFA_PL_LOG_TYPE_INT;
  331. lp.misc = misc;
  332. for (i = 0; i < num_ints; i++)
  333. lp.log_entry.int_log[i] = intarr[i];
  334. lp.log_num_ints = (u8) num_ints;
  335. bfa_plog_add(plog, &lp);
  336. }
  337. }
  338. void
  339. bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  340. enum bfa_plog_eid event,
  341. u16 misc, struct fchs_s *fchdr)
  342. {
  343. struct bfa_plog_rec_s lp;
  344. u32 *tmp_int = (u32 *) fchdr;
  345. u32 ints[BFA_PL_INT_LOG_SZ];
  346. if (plog->plog_enabled) {
  347. memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
  348. ints[0] = tmp_int[0];
  349. ints[1] = tmp_int[1];
  350. ints[2] = tmp_int[4];
  351. bfa_plog_intarr(plog, mid, event, misc, ints, 3);
  352. }
  353. }
  354. void
  355. bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  356. enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
  357. u32 pld_w0)
  358. {
  359. struct bfa_plog_rec_s lp;
  360. u32 *tmp_int = (u32 *) fchdr;
  361. u32 ints[BFA_PL_INT_LOG_SZ];
  362. if (plog->plog_enabled) {
  363. memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
  364. ints[0] = tmp_int[0];
  365. ints[1] = tmp_int[1];
  366. ints[2] = tmp_int[4];
  367. ints[3] = pld_w0;
  368. bfa_plog_intarr(plog, mid, event, misc, ints, 4);
  369. }
  370. }
  371. /*
  372. * fcxp_pvt BFA FCXP private functions
  373. */
  374. static void
  375. claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
  376. {
  377. u8 *dm_kva = NULL;
  378. u64 dm_pa;
  379. u32 buf_pool_sz;
  380. dm_kva = bfa_meminfo_dma_virt(mi);
  381. dm_pa = bfa_meminfo_dma_phys(mi);
  382. buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
  383. /*
  384. * Initialize the fcxp req payload list
  385. */
  386. mod->req_pld_list_kva = dm_kva;
  387. mod->req_pld_list_pa = dm_pa;
  388. dm_kva += buf_pool_sz;
  389. dm_pa += buf_pool_sz;
  390. memset(mod->req_pld_list_kva, 0, buf_pool_sz);
  391. /*
  392. * Initialize the fcxp rsp payload list
  393. */
  394. buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
  395. mod->rsp_pld_list_kva = dm_kva;
  396. mod->rsp_pld_list_pa = dm_pa;
  397. dm_kva += buf_pool_sz;
  398. dm_pa += buf_pool_sz;
  399. memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
  400. bfa_meminfo_dma_virt(mi) = dm_kva;
  401. bfa_meminfo_dma_phys(mi) = dm_pa;
  402. }
  403. static void
  404. claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
  405. {
  406. u16 i;
  407. struct bfa_fcxp_s *fcxp;
  408. fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
  409. memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
  410. INIT_LIST_HEAD(&mod->fcxp_free_q);
  411. INIT_LIST_HEAD(&mod->fcxp_active_q);
  412. mod->fcxp_list = fcxp;
  413. for (i = 0; i < mod->num_fcxps; i++) {
  414. fcxp->fcxp_mod = mod;
  415. fcxp->fcxp_tag = i;
  416. list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
  417. bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
  418. fcxp->reqq_waiting = BFA_FALSE;
  419. fcxp = fcxp + 1;
  420. }
  421. bfa_meminfo_kva(mi) = (void *)fcxp;
  422. }
  423. static void
  424. bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
  425. u32 *dm_len)
  426. {
  427. u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
  428. if (num_fcxp_reqs == 0)
  429. return;
  430. /*
  431. * Account for req/rsp payload
  432. */
  433. *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
  434. if (cfg->drvcfg.min_cfg)
  435. *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
  436. else
  437. *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
  438. /*
  439. * Account for fcxp structs
  440. */
  441. *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
  442. }
  443. static void
  444. bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  445. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  446. {
  447. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  448. memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
  449. mod->bfa = bfa;
  450. mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
  451. /*
  452. * Initialize FCXP request and response payload sizes.
  453. */
  454. mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
  455. if (!cfg->drvcfg.min_cfg)
  456. mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
  457. INIT_LIST_HEAD(&mod->wait_q);
  458. claim_fcxp_req_rsp_mem(mod, meminfo);
  459. claim_fcxps_mem(mod, meminfo);
  460. }
  461. static void
  462. bfa_fcxp_detach(struct bfa_s *bfa)
  463. {
  464. }
  465. static void
  466. bfa_fcxp_start(struct bfa_s *bfa)
  467. {
  468. }
  469. static void
  470. bfa_fcxp_stop(struct bfa_s *bfa)
  471. {
  472. }
  473. static void
  474. bfa_fcxp_iocdisable(struct bfa_s *bfa)
  475. {
  476. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  477. struct bfa_fcxp_s *fcxp;
  478. struct list_head *qe, *qen;
  479. list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
  480. fcxp = (struct bfa_fcxp_s *) qe;
  481. if (fcxp->caller == NULL) {
  482. fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
  483. BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
  484. bfa_fcxp_free(fcxp);
  485. } else {
  486. fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
  487. bfa_cb_queue(bfa, &fcxp->hcb_qe,
  488. __bfa_fcxp_send_cbfn, fcxp);
  489. }
  490. }
  491. }
  492. static struct bfa_fcxp_s *
  493. bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
  494. {
  495. struct bfa_fcxp_s *fcxp;
  496. bfa_q_deq(&fm->fcxp_free_q, &fcxp);
  497. if (fcxp)
  498. list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
  499. return fcxp;
  500. }
  501. static void
  502. bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
  503. struct bfa_s *bfa,
  504. u8 *use_ibuf,
  505. u32 *nr_sgles,
  506. bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
  507. bfa_fcxp_get_sglen_t *r_sglen_cbfn,
  508. struct list_head *r_sgpg_q,
  509. int n_sgles,
  510. bfa_fcxp_get_sgaddr_t sga_cbfn,
  511. bfa_fcxp_get_sglen_t sglen_cbfn)
  512. {
  513. bfa_assert(bfa != NULL);
  514. bfa_trc(bfa, fcxp->fcxp_tag);
  515. if (n_sgles == 0) {
  516. *use_ibuf = 1;
  517. } else {
  518. bfa_assert(*sga_cbfn != NULL);
  519. bfa_assert(*sglen_cbfn != NULL);
  520. *use_ibuf = 0;
  521. *r_sga_cbfn = sga_cbfn;
  522. *r_sglen_cbfn = sglen_cbfn;
  523. *nr_sgles = n_sgles;
  524. /*
  525. * alloc required sgpgs
  526. */
  527. if (n_sgles > BFI_SGE_INLINE)
  528. bfa_assert(0);
  529. }
  530. }
  531. static void
  532. bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
  533. void *caller, struct bfa_s *bfa, int nreq_sgles,
  534. int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
  535. bfa_fcxp_get_sglen_t req_sglen_cbfn,
  536. bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
  537. bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
  538. {
  539. bfa_assert(bfa != NULL);
  540. bfa_trc(bfa, fcxp->fcxp_tag);
  541. fcxp->caller = caller;
  542. bfa_fcxp_init_reqrsp(fcxp, bfa,
  543. &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
  544. &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
  545. nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
  546. bfa_fcxp_init_reqrsp(fcxp, bfa,
  547. &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
  548. &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
  549. nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
  550. }
  551. static void
  552. bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
  553. {
  554. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  555. struct bfa_fcxp_wqe_s *wqe;
  556. bfa_q_deq(&mod->wait_q, &wqe);
  557. if (wqe) {
  558. bfa_trc(mod->bfa, fcxp->fcxp_tag);
  559. bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
  560. wqe->nrsp_sgles, wqe->req_sga_cbfn,
  561. wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
  562. wqe->rsp_sglen_cbfn);
  563. wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
  564. return;
  565. }
  566. bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
  567. list_del(&fcxp->qe);
  568. list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
  569. }
  570. static void
  571. bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
  572. bfa_status_t req_status, u32 rsp_len,
  573. u32 resid_len, struct fchs_s *rsp_fchs)
  574. {
  575. /* discarded fcxp completion */
  576. }
  577. static void
  578. __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
  579. {
  580. struct bfa_fcxp_s *fcxp = cbarg;
  581. if (complete) {
  582. fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
  583. fcxp->rsp_status, fcxp->rsp_len,
  584. fcxp->residue_len, &fcxp->rsp_fchs);
  585. } else {
  586. bfa_fcxp_free(fcxp);
  587. }
  588. }
  589. static void
  590. hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
  591. {
  592. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  593. struct bfa_fcxp_s *fcxp;
  594. u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
  595. bfa_trc(bfa, fcxp_tag);
  596. fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
  597. /*
  598. * @todo f/w should not set residue to non-0 when everything
  599. * is received.
  600. */
  601. if (fcxp_rsp->req_status == BFA_STATUS_OK)
  602. fcxp_rsp->residue_len = 0;
  603. else
  604. fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
  605. fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
  606. bfa_assert(fcxp->send_cbfn != NULL);
  607. hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
  608. if (fcxp->send_cbfn != NULL) {
  609. bfa_trc(mod->bfa, (NULL == fcxp->caller));
  610. if (fcxp->caller == NULL) {
  611. fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
  612. fcxp_rsp->req_status, fcxp_rsp->rsp_len,
  613. fcxp_rsp->residue_len, &fcxp_rsp->fchs);
  614. /*
  615. * fcxp automatically freed on return from the callback
  616. */
  617. bfa_fcxp_free(fcxp);
  618. } else {
  619. fcxp->rsp_status = fcxp_rsp->req_status;
  620. fcxp->rsp_len = fcxp_rsp->rsp_len;
  621. fcxp->residue_len = fcxp_rsp->residue_len;
  622. fcxp->rsp_fchs = fcxp_rsp->fchs;
  623. bfa_cb_queue(bfa, &fcxp->hcb_qe,
  624. __bfa_fcxp_send_cbfn, fcxp);
  625. }
  626. } else {
  627. bfa_trc(bfa, (NULL == fcxp->send_cbfn));
  628. }
  629. }
  630. static void
  631. hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
  632. {
  633. union bfi_addr_u sga_zero = { {0} };
  634. sge->sg_len = reqlen;
  635. sge->flags = BFI_SGE_DATA_LAST;
  636. bfa_dma_addr_set(sge[0].sga, req_pa);
  637. bfa_sge_to_be(sge);
  638. sge++;
  639. sge->sga = sga_zero;
  640. sge->sg_len = reqlen;
  641. sge->flags = BFI_SGE_PGDLEN;
  642. bfa_sge_to_be(sge);
  643. }
  644. static void
  645. hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
  646. struct fchs_s *fchs)
  647. {
  648. /*
  649. * TODO: TX ox_id
  650. */
  651. if (reqlen > 0) {
  652. if (fcxp->use_ireqbuf) {
  653. u32 pld_w0 =
  654. *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
  655. bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
  656. BFA_PL_EID_TX,
  657. reqlen + sizeof(struct fchs_s), fchs,
  658. pld_w0);
  659. } else {
  660. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
  661. BFA_PL_EID_TX,
  662. reqlen + sizeof(struct fchs_s),
  663. fchs);
  664. }
  665. } else {
  666. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
  667. reqlen + sizeof(struct fchs_s), fchs);
  668. }
  669. }
  670. static void
  671. hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
  672. struct bfi_fcxp_send_rsp_s *fcxp_rsp)
  673. {
  674. if (fcxp_rsp->rsp_len > 0) {
  675. if (fcxp->use_irspbuf) {
  676. u32 pld_w0 =
  677. *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
  678. bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
  679. BFA_PL_EID_RX,
  680. (u16) fcxp_rsp->rsp_len,
  681. &fcxp_rsp->fchs, pld_w0);
  682. } else {
  683. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
  684. BFA_PL_EID_RX,
  685. (u16) fcxp_rsp->rsp_len,
  686. &fcxp_rsp->fchs);
  687. }
  688. } else {
  689. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
  690. (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
  691. }
  692. }
  693. /*
  694. * Handler to resume sending fcxp when space in available in cpe queue.
  695. */
  696. static void
  697. bfa_fcxp_qresume(void *cbarg)
  698. {
  699. struct bfa_fcxp_s *fcxp = cbarg;
  700. struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
  701. struct bfi_fcxp_send_req_s *send_req;
  702. fcxp->reqq_waiting = BFA_FALSE;
  703. send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
  704. bfa_fcxp_queue(fcxp, send_req);
  705. }
  706. /*
  707. * Queue fcxp send request to foimrware.
  708. */
  709. static void
  710. bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
  711. {
  712. struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
  713. struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
  714. struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
  715. struct bfa_rport_s *rport = reqi->bfa_rport;
  716. bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
  717. bfa_lpuid(bfa));
  718. send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
  719. if (rport) {
  720. send_req->rport_fw_hndl = rport->fw_handle;
  721. send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
  722. if (send_req->max_frmsz == 0)
  723. send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
  724. } else {
  725. send_req->rport_fw_hndl = 0;
  726. send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
  727. }
  728. send_req->vf_id = cpu_to_be16(reqi->vf_id);
  729. send_req->lp_tag = reqi->lp_tag;
  730. send_req->class = reqi->class;
  731. send_req->rsp_timeout = rspi->rsp_timeout;
  732. send_req->cts = reqi->cts;
  733. send_req->fchs = reqi->fchs;
  734. send_req->req_len = cpu_to_be32(reqi->req_tot_len);
  735. send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
  736. /*
  737. * setup req sgles
  738. */
  739. if (fcxp->use_ireqbuf == 1) {
  740. hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
  741. BFA_FCXP_REQ_PLD_PA(fcxp));
  742. } else {
  743. if (fcxp->nreq_sgles > 0) {
  744. bfa_assert(fcxp->nreq_sgles == 1);
  745. hal_fcxp_set_local_sges(send_req->req_sge,
  746. reqi->req_tot_len,
  747. fcxp->req_sga_cbfn(fcxp->caller,
  748. 0));
  749. } else {
  750. bfa_assert(reqi->req_tot_len == 0);
  751. hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
  752. }
  753. }
  754. /*
  755. * setup rsp sgles
  756. */
  757. if (fcxp->use_irspbuf == 1) {
  758. bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
  759. hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
  760. BFA_FCXP_RSP_PLD_PA(fcxp));
  761. } else {
  762. if (fcxp->nrsp_sgles > 0) {
  763. bfa_assert(fcxp->nrsp_sgles == 1);
  764. hal_fcxp_set_local_sges(send_req->rsp_sge,
  765. rspi->rsp_maxlen,
  766. fcxp->rsp_sga_cbfn(fcxp->caller,
  767. 0));
  768. } else {
  769. bfa_assert(rspi->rsp_maxlen == 0);
  770. hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
  771. }
  772. }
  773. hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
  774. bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
  775. bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
  776. bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
  777. }
  778. /*
  779. * Allocate an FCXP instance to send a response or to send a request
  780. * that has a response. Request/response buffers are allocated by caller.
  781. *
  782. * @param[in] bfa BFA bfa instance
  783. * @param[in] nreq_sgles Number of SG elements required for request
  784. * buffer. 0, if fcxp internal buffers are used.
  785. * Use bfa_fcxp_get_reqbuf() to get the
  786. * internal req buffer.
  787. * @param[in] req_sgles SG elements describing request buffer. Will be
  788. * copied in by BFA and hence can be freed on
  789. * return from this function.
  790. * @param[in] get_req_sga function ptr to be called to get a request SG
  791. * Address (given the sge index).
  792. * @param[in] get_req_sglen function ptr to be called to get a request SG
  793. * len (given the sge index).
  794. * @param[in] get_rsp_sga function ptr to be called to get a response SG
  795. * Address (given the sge index).
  796. * @param[in] get_rsp_sglen function ptr to be called to get a response SG
  797. * len (given the sge index).
  798. *
  799. * @return FCXP instance. NULL on failure.
  800. */
  801. struct bfa_fcxp_s *
  802. bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
  803. int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
  804. bfa_fcxp_get_sglen_t req_sglen_cbfn,
  805. bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
  806. bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
  807. {
  808. struct bfa_fcxp_s *fcxp = NULL;
  809. bfa_assert(bfa != NULL);
  810. fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
  811. if (fcxp == NULL)
  812. return NULL;
  813. bfa_trc(bfa, fcxp->fcxp_tag);
  814. bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
  815. req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
  816. return fcxp;
  817. }
  818. /*
  819. * Get the internal request buffer pointer
  820. *
  821. * @param[in] fcxp BFA fcxp pointer
  822. *
  823. * @return pointer to the internal request buffer
  824. */
  825. void *
  826. bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
  827. {
  828. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  829. void *reqbuf;
  830. bfa_assert(fcxp->use_ireqbuf == 1);
  831. reqbuf = ((u8 *)mod->req_pld_list_kva) +
  832. fcxp->fcxp_tag * mod->req_pld_sz;
  833. return reqbuf;
  834. }
  835. u32
  836. bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
  837. {
  838. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  839. return mod->req_pld_sz;
  840. }
  841. /*
  842. * Get the internal response buffer pointer
  843. *
  844. * @param[in] fcxp BFA fcxp pointer
  845. *
  846. * @return pointer to the internal request buffer
  847. */
  848. void *
  849. bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
  850. {
  851. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  852. void *rspbuf;
  853. bfa_assert(fcxp->use_irspbuf == 1);
  854. rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
  855. fcxp->fcxp_tag * mod->rsp_pld_sz;
  856. return rspbuf;
  857. }
  858. /*
  859. * Free the BFA FCXP
  860. *
  861. * @param[in] fcxp BFA fcxp pointer
  862. *
  863. * @return void
  864. */
  865. void
  866. bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
  867. {
  868. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  869. bfa_assert(fcxp != NULL);
  870. bfa_trc(mod->bfa, fcxp->fcxp_tag);
  871. bfa_fcxp_put(fcxp);
  872. }
  873. /*
  874. * Send a FCXP request
  875. *
  876. * @param[in] fcxp BFA fcxp pointer
  877. * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
  878. * @param[in] vf_id virtual Fabric ID
  879. * @param[in] lp_tag lport tag
  880. * @param[in] cts use Continous sequence
  881. * @param[in] cos fc Class of Service
  882. * @param[in] reqlen request length, does not include FCHS length
  883. * @param[in] fchs fc Header Pointer. The header content will be copied
  884. * in by BFA.
  885. *
  886. * @param[in] cbfn call back function to be called on receiving
  887. * the response
  888. * @param[in] cbarg arg for cbfn
  889. * @param[in] rsp_timeout
  890. * response timeout
  891. *
  892. * @return bfa_status_t
  893. */
  894. void
  895. bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
  896. u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
  897. u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
  898. void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
  899. {
  900. struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
  901. struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
  902. struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
  903. struct bfi_fcxp_send_req_s *send_req;
  904. bfa_trc(bfa, fcxp->fcxp_tag);
  905. /*
  906. * setup request/response info
  907. */
  908. reqi->bfa_rport = rport;
  909. reqi->vf_id = vf_id;
  910. reqi->lp_tag = lp_tag;
  911. reqi->class = cos;
  912. rspi->rsp_timeout = rsp_timeout;
  913. reqi->cts = cts;
  914. reqi->fchs = *fchs;
  915. reqi->req_tot_len = reqlen;
  916. rspi->rsp_maxlen = rsp_maxlen;
  917. fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
  918. fcxp->send_cbarg = cbarg;
  919. /*
  920. * If no room in CPE queue, wait for space in request queue
  921. */
  922. send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
  923. if (!send_req) {
  924. bfa_trc(bfa, fcxp->fcxp_tag);
  925. fcxp->reqq_waiting = BFA_TRUE;
  926. bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
  927. return;
  928. }
  929. bfa_fcxp_queue(fcxp, send_req);
  930. }
  931. /*
  932. * Abort a BFA FCXP
  933. *
  934. * @param[in] fcxp BFA fcxp pointer
  935. *
  936. * @return void
  937. */
  938. bfa_status_t
  939. bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
  940. {
  941. bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
  942. bfa_assert(0);
  943. return BFA_STATUS_OK;
  944. }
  945. void
  946. bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
  947. bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
  948. void *caller, int nreq_sgles,
  949. int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
  950. bfa_fcxp_get_sglen_t req_sglen_cbfn,
  951. bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
  952. bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
  953. {
  954. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  955. bfa_assert(list_empty(&mod->fcxp_free_q));
  956. wqe->alloc_cbfn = alloc_cbfn;
  957. wqe->alloc_cbarg = alloc_cbarg;
  958. wqe->caller = caller;
  959. wqe->bfa = bfa;
  960. wqe->nreq_sgles = nreq_sgles;
  961. wqe->nrsp_sgles = nrsp_sgles;
  962. wqe->req_sga_cbfn = req_sga_cbfn;
  963. wqe->req_sglen_cbfn = req_sglen_cbfn;
  964. wqe->rsp_sga_cbfn = rsp_sga_cbfn;
  965. wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
  966. list_add_tail(&wqe->qe, &mod->wait_q);
  967. }
  968. void
  969. bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
  970. {
  971. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  972. bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
  973. list_del(&wqe->qe);
  974. }
  975. void
  976. bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
  977. {
  978. /*
  979. * If waiting for room in request queue, cancel reqq wait
  980. * and free fcxp.
  981. */
  982. if (fcxp->reqq_waiting) {
  983. fcxp->reqq_waiting = BFA_FALSE;
  984. bfa_reqq_wcancel(&fcxp->reqq_wqe);
  985. bfa_fcxp_free(fcxp);
  986. return;
  987. }
  988. fcxp->send_cbfn = bfa_fcxp_null_comp;
  989. }
  990. void
  991. bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
  992. {
  993. switch (msg->mhdr.msg_id) {
  994. case BFI_FCXP_I2H_SEND_RSP:
  995. hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
  996. break;
  997. default:
  998. bfa_trc(bfa, msg->mhdr.msg_id);
  999. bfa_assert(0);
  1000. }
  1001. }
  1002. u32
  1003. bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
  1004. {
  1005. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  1006. return mod->rsp_pld_sz;
  1007. }
  1008. /*
  1009. * BFA LPS state machine functions
  1010. */
  1011. /*
  1012. * Init state -- no login
  1013. */
  1014. static void
  1015. bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1016. {
  1017. bfa_trc(lps->bfa, lps->lp_tag);
  1018. bfa_trc(lps->bfa, event);
  1019. switch (event) {
  1020. case BFA_LPS_SM_LOGIN:
  1021. if (bfa_reqq_full(lps->bfa, lps->reqq)) {
  1022. bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
  1023. bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
  1024. } else {
  1025. bfa_sm_set_state(lps, bfa_lps_sm_login);
  1026. bfa_lps_send_login(lps);
  1027. }
  1028. if (lps->fdisc)
  1029. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1030. BFA_PL_EID_LOGIN, 0, "FDISC Request");
  1031. else
  1032. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1033. BFA_PL_EID_LOGIN, 0, "FLOGI Request");
  1034. break;
  1035. case BFA_LPS_SM_LOGOUT:
  1036. bfa_lps_logout_comp(lps);
  1037. break;
  1038. case BFA_LPS_SM_DELETE:
  1039. bfa_lps_free(lps);
  1040. break;
  1041. case BFA_LPS_SM_RX_CVL:
  1042. case BFA_LPS_SM_OFFLINE:
  1043. break;
  1044. case BFA_LPS_SM_FWRSP:
  1045. /*
  1046. * Could happen when fabric detects loopback and discards
  1047. * the lps request. Fw will eventually sent out the timeout
  1048. * Just ignore
  1049. */
  1050. break;
  1051. default:
  1052. bfa_sm_fault(lps->bfa, event);
  1053. }
  1054. }
  1055. /*
  1056. * login is in progress -- awaiting response from firmware
  1057. */
  1058. static void
  1059. bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1060. {
  1061. bfa_trc(lps->bfa, lps->lp_tag);
  1062. bfa_trc(lps->bfa, event);
  1063. switch (event) {
  1064. case BFA_LPS_SM_FWRSP:
  1065. if (lps->status == BFA_STATUS_OK) {
  1066. bfa_sm_set_state(lps, bfa_lps_sm_online);
  1067. if (lps->fdisc)
  1068. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1069. BFA_PL_EID_LOGIN, 0, "FDISC Accept");
  1070. else
  1071. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1072. BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
  1073. } else {
  1074. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1075. if (lps->fdisc)
  1076. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1077. BFA_PL_EID_LOGIN, 0,
  1078. "FDISC Fail (RJT or timeout)");
  1079. else
  1080. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1081. BFA_PL_EID_LOGIN, 0,
  1082. "FLOGI Fail (RJT or timeout)");
  1083. }
  1084. bfa_lps_login_comp(lps);
  1085. break;
  1086. case BFA_LPS_SM_OFFLINE:
  1087. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1088. break;
  1089. default:
  1090. bfa_sm_fault(lps->bfa, event);
  1091. }
  1092. }
  1093. /*
  1094. * login pending - awaiting space in request queue
  1095. */
  1096. static void
  1097. bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1098. {
  1099. bfa_trc(lps->bfa, lps->lp_tag);
  1100. bfa_trc(lps->bfa, event);
  1101. switch (event) {
  1102. case BFA_LPS_SM_RESUME:
  1103. bfa_sm_set_state(lps, bfa_lps_sm_login);
  1104. break;
  1105. case BFA_LPS_SM_OFFLINE:
  1106. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1107. bfa_reqq_wcancel(&lps->wqe);
  1108. break;
  1109. case BFA_LPS_SM_RX_CVL:
  1110. /*
  1111. * Login was not even sent out; so when getting out
  1112. * of this state, it will appear like a login retry
  1113. * after Clear virtual link
  1114. */
  1115. break;
  1116. default:
  1117. bfa_sm_fault(lps->bfa, event);
  1118. }
  1119. }
  1120. /*
  1121. * login complete
  1122. */
  1123. static void
  1124. bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1125. {
  1126. bfa_trc(lps->bfa, lps->lp_tag);
  1127. bfa_trc(lps->bfa, event);
  1128. switch (event) {
  1129. case BFA_LPS_SM_LOGOUT:
  1130. if (bfa_reqq_full(lps->bfa, lps->reqq)) {
  1131. bfa_sm_set_state(lps, bfa_lps_sm_logowait);
  1132. bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
  1133. } else {
  1134. bfa_sm_set_state(lps, bfa_lps_sm_logout);
  1135. bfa_lps_send_logout(lps);
  1136. }
  1137. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1138. BFA_PL_EID_LOGO, 0, "Logout");
  1139. break;
  1140. case BFA_LPS_SM_RX_CVL:
  1141. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1142. /* Let the vport module know about this event */
  1143. bfa_lps_cvl_event(lps);
  1144. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1145. BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
  1146. break;
  1147. case BFA_LPS_SM_OFFLINE:
  1148. case BFA_LPS_SM_DELETE:
  1149. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1150. break;
  1151. default:
  1152. bfa_sm_fault(lps->bfa, event);
  1153. }
  1154. }
  1155. /*
  1156. * logout in progress - awaiting firmware response
  1157. */
  1158. static void
  1159. bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1160. {
  1161. bfa_trc(lps->bfa, lps->lp_tag);
  1162. bfa_trc(lps->bfa, event);
  1163. switch (event) {
  1164. case BFA_LPS_SM_FWRSP:
  1165. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1166. bfa_lps_logout_comp(lps);
  1167. break;
  1168. case BFA_LPS_SM_OFFLINE:
  1169. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1170. break;
  1171. default:
  1172. bfa_sm_fault(lps->bfa, event);
  1173. }
  1174. }
  1175. /*
  1176. * logout pending -- awaiting space in request queue
  1177. */
  1178. static void
  1179. bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1180. {
  1181. bfa_trc(lps->bfa, lps->lp_tag);
  1182. bfa_trc(lps->bfa, event);
  1183. switch (event) {
  1184. case BFA_LPS_SM_RESUME:
  1185. bfa_sm_set_state(lps, bfa_lps_sm_logout);
  1186. bfa_lps_send_logout(lps);
  1187. break;
  1188. case BFA_LPS_SM_OFFLINE:
  1189. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1190. bfa_reqq_wcancel(&lps->wqe);
  1191. break;
  1192. default:
  1193. bfa_sm_fault(lps->bfa, event);
  1194. }
  1195. }
  1196. /*
  1197. * lps_pvt BFA LPS private functions
  1198. */
  1199. /*
  1200. * return memory requirement
  1201. */
  1202. static void
  1203. bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
  1204. u32 *dm_len)
  1205. {
  1206. if (cfg->drvcfg.min_cfg)
  1207. *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
  1208. else
  1209. *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
  1210. }
  1211. /*
  1212. * bfa module attach at initialization time
  1213. */
  1214. static void
  1215. bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  1216. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  1217. {
  1218. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1219. struct bfa_lps_s *lps;
  1220. int i;
  1221. memset(mod, 0, sizeof(struct bfa_lps_mod_s));
  1222. mod->num_lps = BFA_LPS_MAX_LPORTS;
  1223. if (cfg->drvcfg.min_cfg)
  1224. mod->num_lps = BFA_LPS_MIN_LPORTS;
  1225. else
  1226. mod->num_lps = BFA_LPS_MAX_LPORTS;
  1227. mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
  1228. bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
  1229. INIT_LIST_HEAD(&mod->lps_free_q);
  1230. INIT_LIST_HEAD(&mod->lps_active_q);
  1231. for (i = 0; i < mod->num_lps; i++, lps++) {
  1232. lps->bfa = bfa;
  1233. lps->lp_tag = (u8) i;
  1234. lps->reqq = BFA_REQQ_LPS;
  1235. bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
  1236. list_add_tail(&lps->qe, &mod->lps_free_q);
  1237. }
  1238. }
  1239. static void
  1240. bfa_lps_detach(struct bfa_s *bfa)
  1241. {
  1242. }
  1243. static void
  1244. bfa_lps_start(struct bfa_s *bfa)
  1245. {
  1246. }
  1247. static void
  1248. bfa_lps_stop(struct bfa_s *bfa)
  1249. {
  1250. }
  1251. /*
  1252. * IOC in disabled state -- consider all lps offline
  1253. */
  1254. static void
  1255. bfa_lps_iocdisable(struct bfa_s *bfa)
  1256. {
  1257. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1258. struct bfa_lps_s *lps;
  1259. struct list_head *qe, *qen;
  1260. list_for_each_safe(qe, qen, &mod->lps_active_q) {
  1261. lps = (struct bfa_lps_s *) qe;
  1262. bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
  1263. }
  1264. }
  1265. /*
  1266. * Firmware login response
  1267. */
  1268. static void
  1269. bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
  1270. {
  1271. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1272. struct bfa_lps_s *lps;
  1273. bfa_assert(rsp->lp_tag < mod->num_lps);
  1274. lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
  1275. lps->status = rsp->status;
  1276. switch (rsp->status) {
  1277. case BFA_STATUS_OK:
  1278. lps->fport = rsp->f_port;
  1279. lps->npiv_en = rsp->npiv_en;
  1280. lps->lp_pid = rsp->lp_pid;
  1281. lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
  1282. lps->pr_pwwn = rsp->port_name;
  1283. lps->pr_nwwn = rsp->node_name;
  1284. lps->auth_req = rsp->auth_req;
  1285. lps->lp_mac = rsp->lp_mac;
  1286. lps->brcd_switch = rsp->brcd_switch;
  1287. lps->fcf_mac = rsp->fcf_mac;
  1288. break;
  1289. case BFA_STATUS_FABRIC_RJT:
  1290. lps->lsrjt_rsn = rsp->lsrjt_rsn;
  1291. lps->lsrjt_expl = rsp->lsrjt_expl;
  1292. break;
  1293. case BFA_STATUS_EPROTOCOL:
  1294. lps->ext_status = rsp->ext_status;
  1295. break;
  1296. default:
  1297. /* Nothing to do with other status */
  1298. break;
  1299. }
  1300. bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
  1301. }
  1302. /*
  1303. * Firmware logout response
  1304. */
  1305. static void
  1306. bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
  1307. {
  1308. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1309. struct bfa_lps_s *lps;
  1310. bfa_assert(rsp->lp_tag < mod->num_lps);
  1311. lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
  1312. bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
  1313. }
  1314. /*
  1315. * Firmware received a Clear virtual link request (for FCoE)
  1316. */
  1317. static void
  1318. bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
  1319. {
  1320. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1321. struct bfa_lps_s *lps;
  1322. lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
  1323. bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
  1324. }
  1325. /*
  1326. * Space is available in request queue, resume queueing request to firmware.
  1327. */
  1328. static void
  1329. bfa_lps_reqq_resume(void *lps_arg)
  1330. {
  1331. struct bfa_lps_s *lps = lps_arg;
  1332. bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
  1333. }
  1334. /*
  1335. * lps is freed -- triggered by vport delete
  1336. */
  1337. static void
  1338. bfa_lps_free(struct bfa_lps_s *lps)
  1339. {
  1340. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
  1341. lps->lp_pid = 0;
  1342. list_del(&lps->qe);
  1343. list_add_tail(&lps->qe, &mod->lps_free_q);
  1344. }
  1345. /*
  1346. * send login request to firmware
  1347. */
  1348. static void
  1349. bfa_lps_send_login(struct bfa_lps_s *lps)
  1350. {
  1351. struct bfi_lps_login_req_s *m;
  1352. m = bfa_reqq_next(lps->bfa, lps->reqq);
  1353. bfa_assert(m);
  1354. bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
  1355. bfa_lpuid(lps->bfa));
  1356. m->lp_tag = lps->lp_tag;
  1357. m->alpa = lps->alpa;
  1358. m->pdu_size = cpu_to_be16(lps->pdusz);
  1359. m->pwwn = lps->pwwn;
  1360. m->nwwn = lps->nwwn;
  1361. m->fdisc = lps->fdisc;
  1362. m->auth_en = lps->auth_en;
  1363. bfa_reqq_produce(lps->bfa, lps->reqq);
  1364. }
  1365. /*
  1366. * send logout request to firmware
  1367. */
  1368. static void
  1369. bfa_lps_send_logout(struct bfa_lps_s *lps)
  1370. {
  1371. struct bfi_lps_logout_req_s *m;
  1372. m = bfa_reqq_next(lps->bfa, lps->reqq);
  1373. bfa_assert(m);
  1374. bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
  1375. bfa_lpuid(lps->bfa));
  1376. m->lp_tag = lps->lp_tag;
  1377. m->port_name = lps->pwwn;
  1378. bfa_reqq_produce(lps->bfa, lps->reqq);
  1379. }
  1380. /*
  1381. * Indirect login completion handler for non-fcs
  1382. */
  1383. static void
  1384. bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
  1385. {
  1386. struct bfa_lps_s *lps = arg;
  1387. if (!complete)
  1388. return;
  1389. if (lps->fdisc)
  1390. bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
  1391. else
  1392. bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
  1393. }
  1394. /*
  1395. * Login completion handler -- direct call for fcs, queue for others
  1396. */
  1397. static void
  1398. bfa_lps_login_comp(struct bfa_lps_s *lps)
  1399. {
  1400. if (!lps->bfa->fcs) {
  1401. bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
  1402. lps);
  1403. return;
  1404. }
  1405. if (lps->fdisc)
  1406. bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
  1407. else
  1408. bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
  1409. }
  1410. /*
  1411. * Indirect logout completion handler for non-fcs
  1412. */
  1413. static void
  1414. bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
  1415. {
  1416. struct bfa_lps_s *lps = arg;
  1417. if (!complete)
  1418. return;
  1419. if (lps->fdisc)
  1420. bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
  1421. }
  1422. /*
  1423. * Logout completion handler -- direct call for fcs, queue for others
  1424. */
  1425. static void
  1426. bfa_lps_logout_comp(struct bfa_lps_s *lps)
  1427. {
  1428. if (!lps->bfa->fcs) {
  1429. bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
  1430. lps);
  1431. return;
  1432. }
  1433. if (lps->fdisc)
  1434. bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
  1435. }
  1436. /*
  1437. * Clear virtual link completion handler for non-fcs
  1438. */
  1439. static void
  1440. bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
  1441. {
  1442. struct bfa_lps_s *lps = arg;
  1443. if (!complete)
  1444. return;
  1445. /* Clear virtual link to base port will result in link down */
  1446. if (lps->fdisc)
  1447. bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
  1448. }
  1449. /*
  1450. * Received Clear virtual link event --direct call for fcs,
  1451. * queue for others
  1452. */
  1453. static void
  1454. bfa_lps_cvl_event(struct bfa_lps_s *lps)
  1455. {
  1456. if (!lps->bfa->fcs) {
  1457. bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
  1458. lps);
  1459. return;
  1460. }
  1461. /* Clear virtual link to base port will result in link down */
  1462. if (lps->fdisc)
  1463. bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
  1464. }
  1465. /*
  1466. * lps_public BFA LPS public functions
  1467. */
  1468. u32
  1469. bfa_lps_get_max_vport(struct bfa_s *bfa)
  1470. {
  1471. if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
  1472. return BFA_LPS_MAX_VPORTS_SUPP_CT;
  1473. else
  1474. return BFA_LPS_MAX_VPORTS_SUPP_CB;
  1475. }
  1476. /*
  1477. * Allocate a lport srvice tag.
  1478. */
  1479. struct bfa_lps_s *
  1480. bfa_lps_alloc(struct bfa_s *bfa)
  1481. {
  1482. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1483. struct bfa_lps_s *lps = NULL;
  1484. bfa_q_deq(&mod->lps_free_q, &lps);
  1485. if (lps == NULL)
  1486. return NULL;
  1487. list_add_tail(&lps->qe, &mod->lps_active_q);
  1488. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1489. return lps;
  1490. }
  1491. /*
  1492. * Free lport service tag. This can be called anytime after an alloc.
  1493. * No need to wait for any pending login/logout completions.
  1494. */
  1495. void
  1496. bfa_lps_delete(struct bfa_lps_s *lps)
  1497. {
  1498. bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
  1499. }
  1500. /*
  1501. * Initiate a lport login.
  1502. */
  1503. void
  1504. bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
  1505. wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
  1506. {
  1507. lps->uarg = uarg;
  1508. lps->alpa = alpa;
  1509. lps->pdusz = pdusz;
  1510. lps->pwwn = pwwn;
  1511. lps->nwwn = nwwn;
  1512. lps->fdisc = BFA_FALSE;
  1513. lps->auth_en = auth_en;
  1514. bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
  1515. }
  1516. /*
  1517. * Initiate a lport fdisc login.
  1518. */
  1519. void
  1520. bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
  1521. wwn_t nwwn)
  1522. {
  1523. lps->uarg = uarg;
  1524. lps->alpa = 0;
  1525. lps->pdusz = pdusz;
  1526. lps->pwwn = pwwn;
  1527. lps->nwwn = nwwn;
  1528. lps->fdisc = BFA_TRUE;
  1529. lps->auth_en = BFA_FALSE;
  1530. bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
  1531. }
  1532. /*
  1533. * Initiate a lport FDSIC logout.
  1534. */
  1535. void
  1536. bfa_lps_fdisclogo(struct bfa_lps_s *lps)
  1537. {
  1538. bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
  1539. }
  1540. /*
  1541. * Return lport services tag given the pid
  1542. */
  1543. u8
  1544. bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
  1545. {
  1546. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1547. struct bfa_lps_s *lps;
  1548. int i;
  1549. for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
  1550. if (lps->lp_pid == pid)
  1551. return lps->lp_tag;
  1552. }
  1553. /* Return base port tag anyway */
  1554. return 0;
  1555. }
  1556. /*
  1557. * return port id assigned to the base lport
  1558. */
  1559. u32
  1560. bfa_lps_get_base_pid(struct bfa_s *bfa)
  1561. {
  1562. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1563. return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
  1564. }
  1565. /*
  1566. * LPS firmware message class handler.
  1567. */
  1568. void
  1569. bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  1570. {
  1571. union bfi_lps_i2h_msg_u msg;
  1572. bfa_trc(bfa, m->mhdr.msg_id);
  1573. msg.msg = m;
  1574. switch (m->mhdr.msg_id) {
  1575. case BFI_LPS_H2I_LOGIN_RSP:
  1576. bfa_lps_login_rsp(bfa, msg.login_rsp);
  1577. break;
  1578. case BFI_LPS_H2I_LOGOUT_RSP:
  1579. bfa_lps_logout_rsp(bfa, msg.logout_rsp);
  1580. break;
  1581. case BFI_LPS_H2I_CVL_EVENT:
  1582. bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
  1583. break;
  1584. default:
  1585. bfa_trc(bfa, m->mhdr.msg_id);
  1586. bfa_assert(0);
  1587. }
  1588. }
  1589. /*
  1590. * FC PORT state machine functions
  1591. */
  1592. static void
  1593. bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
  1594. enum bfa_fcport_sm_event event)
  1595. {
  1596. bfa_trc(fcport->bfa, event);
  1597. switch (event) {
  1598. case BFA_FCPORT_SM_START:
  1599. /*
  1600. * Start event after IOC is configured and BFA is started.
  1601. */
  1602. if (bfa_fcport_send_enable(fcport)) {
  1603. bfa_trc(fcport->bfa, BFA_TRUE);
  1604. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  1605. } else {
  1606. bfa_trc(fcport->bfa, BFA_FALSE);
  1607. bfa_sm_set_state(fcport,
  1608. bfa_fcport_sm_enabling_qwait);
  1609. }
  1610. break;
  1611. case BFA_FCPORT_SM_ENABLE:
  1612. /*
  1613. * Port is persistently configured to be in enabled state. Do
  1614. * not change state. Port enabling is done when START event is
  1615. * received.
  1616. */
  1617. break;
  1618. case BFA_FCPORT_SM_DISABLE:
  1619. /*
  1620. * If a port is persistently configured to be disabled, the
  1621. * first event will a port disable request.
  1622. */
  1623. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  1624. break;
  1625. case BFA_FCPORT_SM_HWFAIL:
  1626. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  1627. break;
  1628. default:
  1629. bfa_sm_fault(fcport->bfa, event);
  1630. }
  1631. }
  1632. static void
  1633. bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
  1634. enum bfa_fcport_sm_event event)
  1635. {
  1636. char pwwn_buf[BFA_STRING_32];
  1637. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1638. bfa_trc(fcport->bfa, event);
  1639. switch (event) {
  1640. case BFA_FCPORT_SM_QRESUME:
  1641. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  1642. bfa_fcport_send_enable(fcport);
  1643. break;
  1644. case BFA_FCPORT_SM_STOP:
  1645. bfa_reqq_wcancel(&fcport->reqq_wait);
  1646. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1647. break;
  1648. case BFA_FCPORT_SM_ENABLE:
  1649. /*
  1650. * Already enable is in progress.
  1651. */
  1652. break;
  1653. case BFA_FCPORT_SM_DISABLE:
  1654. /*
  1655. * Just send disable request to firmware when room becomes
  1656. * available in request queue.
  1657. */
  1658. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  1659. bfa_reqq_wcancel(&fcport->reqq_wait);
  1660. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1661. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  1662. wwn2str(pwwn_buf, fcport->pwwn);
  1663. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1664. "Base port disabled: WWN = %s\n", pwwn_buf);
  1665. break;
  1666. case BFA_FCPORT_SM_LINKUP:
  1667. case BFA_FCPORT_SM_LINKDOWN:
  1668. /*
  1669. * Possible to get link events when doing back-to-back
  1670. * enable/disables.
  1671. */
  1672. break;
  1673. case BFA_FCPORT_SM_HWFAIL:
  1674. bfa_reqq_wcancel(&fcport->reqq_wait);
  1675. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  1676. break;
  1677. default:
  1678. bfa_sm_fault(fcport->bfa, event);
  1679. }
  1680. }
  1681. static void
  1682. bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
  1683. enum bfa_fcport_sm_event event)
  1684. {
  1685. char pwwn_buf[BFA_STRING_32];
  1686. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1687. bfa_trc(fcport->bfa, event);
  1688. switch (event) {
  1689. case BFA_FCPORT_SM_FWRSP:
  1690. case BFA_FCPORT_SM_LINKDOWN:
  1691. bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
  1692. break;
  1693. case BFA_FCPORT_SM_LINKUP:
  1694. bfa_fcport_update_linkinfo(fcport);
  1695. bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
  1696. bfa_assert(fcport->event_cbfn);
  1697. bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
  1698. break;
  1699. case BFA_FCPORT_SM_ENABLE:
  1700. /*
  1701. * Already being enabled.
  1702. */
  1703. break;
  1704. case BFA_FCPORT_SM_DISABLE:
  1705. if (bfa_fcport_send_disable(fcport))
  1706. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  1707. else
  1708. bfa_sm_set_state(fcport,
  1709. bfa_fcport_sm_disabling_qwait);
  1710. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1711. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  1712. wwn2str(pwwn_buf, fcport->pwwn);
  1713. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1714. "Base port disabled: WWN = %s\n", pwwn_buf);
  1715. break;
  1716. case BFA_FCPORT_SM_STOP:
  1717. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1718. break;
  1719. case BFA_FCPORT_SM_HWFAIL:
  1720. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  1721. break;
  1722. default:
  1723. bfa_sm_fault(fcport->bfa, event);
  1724. }
  1725. }
  1726. static void
  1727. bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
  1728. enum bfa_fcport_sm_event event)
  1729. {
  1730. struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
  1731. char pwwn_buf[BFA_STRING_32];
  1732. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1733. bfa_trc(fcport->bfa, event);
  1734. switch (event) {
  1735. case BFA_FCPORT_SM_LINKUP:
  1736. bfa_fcport_update_linkinfo(fcport);
  1737. bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
  1738. bfa_assert(fcport->event_cbfn);
  1739. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1740. BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
  1741. if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
  1742. bfa_trc(fcport->bfa,
  1743. pevent->link_state.vc_fcf.fcf.fipenabled);
  1744. bfa_trc(fcport->bfa,
  1745. pevent->link_state.vc_fcf.fcf.fipfailed);
  1746. if (pevent->link_state.vc_fcf.fcf.fipfailed)
  1747. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1748. BFA_PL_EID_FIP_FCF_DISC, 0,
  1749. "FIP FCF Discovery Failed");
  1750. else
  1751. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1752. BFA_PL_EID_FIP_FCF_DISC, 0,
  1753. "FIP FCF Discovered");
  1754. }
  1755. bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
  1756. wwn2str(pwwn_buf, fcport->pwwn);
  1757. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1758. "Base port online: WWN = %s\n", pwwn_buf);
  1759. break;
  1760. case BFA_FCPORT_SM_LINKDOWN:
  1761. /*
  1762. * Possible to get link down event.
  1763. */
  1764. break;
  1765. case BFA_FCPORT_SM_ENABLE:
  1766. /*
  1767. * Already enabled.
  1768. */
  1769. break;
  1770. case BFA_FCPORT_SM_DISABLE:
  1771. if (bfa_fcport_send_disable(fcport))
  1772. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  1773. else
  1774. bfa_sm_set_state(fcport,
  1775. bfa_fcport_sm_disabling_qwait);
  1776. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1777. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  1778. wwn2str(pwwn_buf, fcport->pwwn);
  1779. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1780. "Base port disabled: WWN = %s\n", pwwn_buf);
  1781. break;
  1782. case BFA_FCPORT_SM_STOP:
  1783. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1784. break;
  1785. case BFA_FCPORT_SM_HWFAIL:
  1786. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  1787. break;
  1788. default:
  1789. bfa_sm_fault(fcport->bfa, event);
  1790. }
  1791. }
  1792. static void
  1793. bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
  1794. enum bfa_fcport_sm_event event)
  1795. {
  1796. char pwwn_buf[BFA_STRING_32];
  1797. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1798. bfa_trc(fcport->bfa, event);
  1799. switch (event) {
  1800. case BFA_FCPORT_SM_ENABLE:
  1801. /*
  1802. * Already enabled.
  1803. */
  1804. break;
  1805. case BFA_FCPORT_SM_DISABLE:
  1806. if (bfa_fcport_send_disable(fcport))
  1807. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  1808. else
  1809. bfa_sm_set_state(fcport,
  1810. bfa_fcport_sm_disabling_qwait);
  1811. bfa_fcport_reset_linkinfo(fcport);
  1812. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
  1813. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1814. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  1815. wwn2str(pwwn_buf, fcport->pwwn);
  1816. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1817. "Base port offline: WWN = %s\n", pwwn_buf);
  1818. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1819. "Base port disabled: WWN = %s\n", pwwn_buf);
  1820. break;
  1821. case BFA_FCPORT_SM_LINKDOWN:
  1822. bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
  1823. bfa_fcport_reset_linkinfo(fcport);
  1824. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
  1825. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1826. BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
  1827. wwn2str(pwwn_buf, fcport->pwwn);
  1828. if (BFA_PORT_IS_DISABLED(fcport->bfa))
  1829. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1830. "Base port offline: WWN = %s\n", pwwn_buf);
  1831. else
  1832. BFA_LOG(KERN_ERR, bfad, bfa_log_level,
  1833. "Base port (WWN = %s) "
  1834. "lost fabric connectivity\n", pwwn_buf);
  1835. break;
  1836. case BFA_FCPORT_SM_STOP:
  1837. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1838. bfa_fcport_reset_linkinfo(fcport);
  1839. wwn2str(pwwn_buf, fcport->pwwn);
  1840. if (BFA_PORT_IS_DISABLED(fcport->bfa))
  1841. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1842. "Base port offline: WWN = %s\n", pwwn_buf);
  1843. else
  1844. BFA_LOG(KERN_ERR, bfad, bfa_log_level,
  1845. "Base port (WWN = %s) "
  1846. "lost fabric connectivity\n", pwwn_buf);
  1847. break;
  1848. case BFA_FCPORT_SM_HWFAIL:
  1849. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  1850. bfa_fcport_reset_linkinfo(fcport);
  1851. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
  1852. wwn2str(pwwn_buf, fcport->pwwn);
  1853. if (BFA_PORT_IS_DISABLED(fcport->bfa))
  1854. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1855. "Base port offline: WWN = %s\n", pwwn_buf);
  1856. else
  1857. BFA_LOG(KERN_ERR, bfad, bfa_log_level,
  1858. "Base port (WWN = %s) "
  1859. "lost fabric connectivity\n", pwwn_buf);
  1860. break;
  1861. default:
  1862. bfa_sm_fault(fcport->bfa, event);
  1863. }
  1864. }
  1865. static void
  1866. bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
  1867. enum bfa_fcport_sm_event event)
  1868. {
  1869. bfa_trc(fcport->bfa, event);
  1870. switch (event) {
  1871. case BFA_FCPORT_SM_QRESUME:
  1872. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  1873. bfa_fcport_send_disable(fcport);
  1874. break;
  1875. case BFA_FCPORT_SM_STOP:
  1876. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1877. bfa_reqq_wcancel(&fcport->reqq_wait);
  1878. break;
  1879. case BFA_FCPORT_SM_ENABLE:
  1880. bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
  1881. break;
  1882. case BFA_FCPORT_SM_DISABLE:
  1883. /*
  1884. * Already being disabled.
  1885. */
  1886. break;
  1887. case BFA_FCPORT_SM_LINKUP:
  1888. case BFA_FCPORT_SM_LINKDOWN:
  1889. /*
  1890. * Possible to get link events when doing back-to-back
  1891. * enable/disables.
  1892. */
  1893. break;
  1894. case BFA_FCPORT_SM_HWFAIL:
  1895. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  1896. bfa_reqq_wcancel(&fcport->reqq_wait);
  1897. break;
  1898. default:
  1899. bfa_sm_fault(fcport->bfa, event);
  1900. }
  1901. }
  1902. static void
  1903. bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
  1904. enum bfa_fcport_sm_event event)
  1905. {
  1906. bfa_trc(fcport->bfa, event);
  1907. switch (event) {
  1908. case BFA_FCPORT_SM_QRESUME:
  1909. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  1910. bfa_fcport_send_disable(fcport);
  1911. if (bfa_fcport_send_enable(fcport))
  1912. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  1913. else
  1914. bfa_sm_set_state(fcport,
  1915. bfa_fcport_sm_enabling_qwait);
  1916. break;
  1917. case BFA_FCPORT_SM_STOP:
  1918. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1919. bfa_reqq_wcancel(&fcport->reqq_wait);
  1920. break;
  1921. case BFA_FCPORT_SM_ENABLE:
  1922. break;
  1923. case BFA_FCPORT_SM_DISABLE:
  1924. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
  1925. break;
  1926. case BFA_FCPORT_SM_LINKUP:
  1927. case BFA_FCPORT_SM_LINKDOWN:
  1928. /*
  1929. * Possible to get link events when doing back-to-back
  1930. * enable/disables.
  1931. */
  1932. break;
  1933. case BFA_FCPORT_SM_HWFAIL:
  1934. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  1935. bfa_reqq_wcancel(&fcport->reqq_wait);
  1936. break;
  1937. default:
  1938. bfa_sm_fault(fcport->bfa, event);
  1939. }
  1940. }
  1941. static void
  1942. bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
  1943. enum bfa_fcport_sm_event event)
  1944. {
  1945. char pwwn_buf[BFA_STRING_32];
  1946. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1947. bfa_trc(fcport->bfa, event);
  1948. switch (event) {
  1949. case BFA_FCPORT_SM_FWRSP:
  1950. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  1951. break;
  1952. case BFA_FCPORT_SM_DISABLE:
  1953. /*
  1954. * Already being disabled.
  1955. */
  1956. break;
  1957. case BFA_FCPORT_SM_ENABLE:
  1958. if (bfa_fcport_send_enable(fcport))
  1959. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  1960. else
  1961. bfa_sm_set_state(fcport,
  1962. bfa_fcport_sm_enabling_qwait);
  1963. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1964. BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
  1965. wwn2str(pwwn_buf, fcport->pwwn);
  1966. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1967. "Base port enabled: WWN = %s\n", pwwn_buf);
  1968. break;
  1969. case BFA_FCPORT_SM_STOP:
  1970. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1971. break;
  1972. case BFA_FCPORT_SM_LINKUP:
  1973. case BFA_FCPORT_SM_LINKDOWN:
  1974. /*
  1975. * Possible to get link events when doing back-to-back
  1976. * enable/disables.
  1977. */
  1978. break;
  1979. case BFA_FCPORT_SM_HWFAIL:
  1980. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  1981. break;
  1982. default:
  1983. bfa_sm_fault(fcport->bfa, event);
  1984. }
  1985. }
  1986. static void
  1987. bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
  1988. enum bfa_fcport_sm_event event)
  1989. {
  1990. char pwwn_buf[BFA_STRING_32];
  1991. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1992. bfa_trc(fcport->bfa, event);
  1993. switch (event) {
  1994. case BFA_FCPORT_SM_START:
  1995. /*
  1996. * Ignore start event for a port that is disabled.
  1997. */
  1998. break;
  1999. case BFA_FCPORT_SM_STOP:
  2000. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  2001. break;
  2002. case BFA_FCPORT_SM_ENABLE:
  2003. if (bfa_fcport_send_enable(fcport))
  2004. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  2005. else
  2006. bfa_sm_set_state(fcport,
  2007. bfa_fcport_sm_enabling_qwait);
  2008. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2009. BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
  2010. wwn2str(pwwn_buf, fcport->pwwn);
  2011. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  2012. "Base port enabled: WWN = %s\n", pwwn_buf);
  2013. break;
  2014. case BFA_FCPORT_SM_DISABLE:
  2015. /*
  2016. * Already disabled.
  2017. */
  2018. break;
  2019. case BFA_FCPORT_SM_HWFAIL:
  2020. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  2021. break;
  2022. default:
  2023. bfa_sm_fault(fcport->bfa, event);
  2024. }
  2025. }
  2026. static void
  2027. bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
  2028. enum bfa_fcport_sm_event event)
  2029. {
  2030. bfa_trc(fcport->bfa, event);
  2031. switch (event) {
  2032. case BFA_FCPORT_SM_START:
  2033. if (bfa_fcport_send_enable(fcport))
  2034. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  2035. else
  2036. bfa_sm_set_state(fcport,
  2037. bfa_fcport_sm_enabling_qwait);
  2038. break;
  2039. default:
  2040. /*
  2041. * Ignore all other events.
  2042. */
  2043. ;
  2044. }
  2045. }
  2046. /*
  2047. * Port is enabled. IOC is down/failed.
  2048. */
  2049. static void
  2050. bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
  2051. enum bfa_fcport_sm_event event)
  2052. {
  2053. bfa_trc(fcport->bfa, event);
  2054. switch (event) {
  2055. case BFA_FCPORT_SM_START:
  2056. if (bfa_fcport_send_enable(fcport))
  2057. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  2058. else
  2059. bfa_sm_set_state(fcport,
  2060. bfa_fcport_sm_enabling_qwait);
  2061. break;
  2062. default:
  2063. /*
  2064. * Ignore all events.
  2065. */
  2066. ;
  2067. }
  2068. }
  2069. /*
  2070. * Port is disabled. IOC is down/failed.
  2071. */
  2072. static void
  2073. bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
  2074. enum bfa_fcport_sm_event event)
  2075. {
  2076. bfa_trc(fcport->bfa, event);
  2077. switch (event) {
  2078. case BFA_FCPORT_SM_START:
  2079. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  2080. break;
  2081. case BFA_FCPORT_SM_ENABLE:
  2082. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  2083. break;
  2084. default:
  2085. /*
  2086. * Ignore all events.
  2087. */
  2088. ;
  2089. }
  2090. }
  2091. /*
  2092. * Link state is down
  2093. */
  2094. static void
  2095. bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
  2096. enum bfa_fcport_ln_sm_event event)
  2097. {
  2098. bfa_trc(ln->fcport->bfa, event);
  2099. switch (event) {
  2100. case BFA_FCPORT_LN_SM_LINKUP:
  2101. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
  2102. bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
  2103. break;
  2104. default:
  2105. bfa_sm_fault(ln->fcport->bfa, event);
  2106. }
  2107. }
  2108. /*
  2109. * Link state is waiting for down notification
  2110. */
  2111. static void
  2112. bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
  2113. enum bfa_fcport_ln_sm_event event)
  2114. {
  2115. bfa_trc(ln->fcport->bfa, event);
  2116. switch (event) {
  2117. case BFA_FCPORT_LN_SM_LINKUP:
  2118. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
  2119. break;
  2120. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2121. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
  2122. break;
  2123. default:
  2124. bfa_sm_fault(ln->fcport->bfa, event);
  2125. }
  2126. }
  2127. /*
  2128. * Link state is waiting for down notification and there is a pending up
  2129. */
  2130. static void
  2131. bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
  2132. enum bfa_fcport_ln_sm_event event)
  2133. {
  2134. bfa_trc(ln->fcport->bfa, event);
  2135. switch (event) {
  2136. case BFA_FCPORT_LN_SM_LINKDOWN:
  2137. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
  2138. break;
  2139. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2140. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
  2141. bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
  2142. break;
  2143. default:
  2144. bfa_sm_fault(ln->fcport->bfa, event);
  2145. }
  2146. }
  2147. /*
  2148. * Link state is up
  2149. */
  2150. static void
  2151. bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
  2152. enum bfa_fcport_ln_sm_event event)
  2153. {
  2154. bfa_trc(ln->fcport->bfa, event);
  2155. switch (event) {
  2156. case BFA_FCPORT_LN_SM_LINKDOWN:
  2157. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
  2158. bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
  2159. break;
  2160. default:
  2161. bfa_sm_fault(ln->fcport->bfa, event);
  2162. }
  2163. }
  2164. /*
  2165. * Link state is waiting for up notification
  2166. */
  2167. static void
  2168. bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
  2169. enum bfa_fcport_ln_sm_event event)
  2170. {
  2171. bfa_trc(ln->fcport->bfa, event);
  2172. switch (event) {
  2173. case BFA_FCPORT_LN_SM_LINKDOWN:
  2174. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
  2175. break;
  2176. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2177. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
  2178. break;
  2179. default:
  2180. bfa_sm_fault(ln->fcport->bfa, event);
  2181. }
  2182. }
  2183. /*
  2184. * Link state is waiting for up notification and there is a pending down
  2185. */
  2186. static void
  2187. bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
  2188. enum bfa_fcport_ln_sm_event event)
  2189. {
  2190. bfa_trc(ln->fcport->bfa, event);
  2191. switch (event) {
  2192. case BFA_FCPORT_LN_SM_LINKUP:
  2193. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
  2194. break;
  2195. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2196. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
  2197. bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
  2198. break;
  2199. default:
  2200. bfa_sm_fault(ln->fcport->bfa, event);
  2201. }
  2202. }
  2203. /*
  2204. * Link state is waiting for up notification and there are pending down and up
  2205. */
  2206. static void
  2207. bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
  2208. enum bfa_fcport_ln_sm_event event)
  2209. {
  2210. bfa_trc(ln->fcport->bfa, event);
  2211. switch (event) {
  2212. case BFA_FCPORT_LN_SM_LINKDOWN:
  2213. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
  2214. break;
  2215. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2216. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
  2217. bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
  2218. break;
  2219. default:
  2220. bfa_sm_fault(ln->fcport->bfa, event);
  2221. }
  2222. }
  2223. static void
  2224. __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
  2225. {
  2226. struct bfa_fcport_ln_s *ln = cbarg;
  2227. if (complete)
  2228. ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
  2229. else
  2230. bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
  2231. }
  2232. /*
  2233. * Send SCN notification to upper layers.
  2234. * trunk - false if caller is fcport to ignore fcport event in trunked mode
  2235. */
  2236. static void
  2237. bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
  2238. bfa_boolean_t trunk)
  2239. {
  2240. if (fcport->cfg.trunked && !trunk)
  2241. return;
  2242. switch (event) {
  2243. case BFA_PORT_LINKUP:
  2244. bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
  2245. break;
  2246. case BFA_PORT_LINKDOWN:
  2247. bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
  2248. break;
  2249. default:
  2250. bfa_assert(0);
  2251. }
  2252. }
  2253. static void
  2254. bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
  2255. {
  2256. struct bfa_fcport_s *fcport = ln->fcport;
  2257. if (fcport->bfa->fcs) {
  2258. fcport->event_cbfn(fcport->event_cbarg, event);
  2259. bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
  2260. } else {
  2261. ln->ln_event = event;
  2262. bfa_cb_queue(fcport->bfa, &ln->ln_qe,
  2263. __bfa_cb_fcport_event, ln);
  2264. }
  2265. }
  2266. #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
  2267. BFA_CACHELINE_SZ))
  2268. static void
  2269. bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
  2270. u32 *dm_len)
  2271. {
  2272. *dm_len += FCPORT_STATS_DMA_SZ;
  2273. }
  2274. static void
  2275. bfa_fcport_qresume(void *cbarg)
  2276. {
  2277. struct bfa_fcport_s *fcport = cbarg;
  2278. bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
  2279. }
  2280. static void
  2281. bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
  2282. {
  2283. u8 *dm_kva;
  2284. u64 dm_pa;
  2285. dm_kva = bfa_meminfo_dma_virt(meminfo);
  2286. dm_pa = bfa_meminfo_dma_phys(meminfo);
  2287. fcport->stats_kva = dm_kva;
  2288. fcport->stats_pa = dm_pa;
  2289. fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
  2290. dm_kva += FCPORT_STATS_DMA_SZ;
  2291. dm_pa += FCPORT_STATS_DMA_SZ;
  2292. bfa_meminfo_dma_virt(meminfo) = dm_kva;
  2293. bfa_meminfo_dma_phys(meminfo) = dm_pa;
  2294. }
  2295. /*
  2296. * Memory initialization.
  2297. */
  2298. static void
  2299. bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  2300. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  2301. {
  2302. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2303. struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
  2304. struct bfa_fcport_ln_s *ln = &fcport->ln;
  2305. struct timeval tv;
  2306. memset(fcport, 0, sizeof(struct bfa_fcport_s));
  2307. fcport->bfa = bfa;
  2308. ln->fcport = fcport;
  2309. bfa_fcport_mem_claim(fcport, meminfo);
  2310. bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
  2311. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
  2312. /*
  2313. * initialize time stamp for stats reset
  2314. */
  2315. do_gettimeofday(&tv);
  2316. fcport->stats_reset_time = tv.tv_sec;
  2317. /*
  2318. * initialize and set default configuration
  2319. */
  2320. port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
  2321. port_cfg->speed = BFA_PORT_SPEED_AUTO;
  2322. port_cfg->trunked = BFA_FALSE;
  2323. port_cfg->maxfrsize = 0;
  2324. port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
  2325. bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
  2326. }
  2327. static void
  2328. bfa_fcport_detach(struct bfa_s *bfa)
  2329. {
  2330. }
  2331. /*
  2332. * Called when IOC is ready.
  2333. */
  2334. static void
  2335. bfa_fcport_start(struct bfa_s *bfa)
  2336. {
  2337. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
  2338. }
  2339. /*
  2340. * Called before IOC is stopped.
  2341. */
  2342. static void
  2343. bfa_fcport_stop(struct bfa_s *bfa)
  2344. {
  2345. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
  2346. bfa_trunk_iocdisable(bfa);
  2347. }
  2348. /*
  2349. * Called when IOC failure is detected.
  2350. */
  2351. static void
  2352. bfa_fcport_iocdisable(struct bfa_s *bfa)
  2353. {
  2354. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2355. bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
  2356. bfa_trunk_iocdisable(bfa);
  2357. }
  2358. static void
  2359. bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
  2360. {
  2361. struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
  2362. struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
  2363. fcport->speed = pevent->link_state.speed;
  2364. fcport->topology = pevent->link_state.topology;
  2365. if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
  2366. fcport->myalpa = 0;
  2367. /* QoS Details */
  2368. fcport->qos_attr = pevent->link_state.qos_attr;
  2369. fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
  2370. /*
  2371. * update trunk state if applicable
  2372. */
  2373. if (!fcport->cfg.trunked)
  2374. trunk->attr.state = BFA_TRUNK_DISABLED;
  2375. /* update FCoE specific */
  2376. fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
  2377. bfa_trc(fcport->bfa, fcport->speed);
  2378. bfa_trc(fcport->bfa, fcport->topology);
  2379. }
  2380. static void
  2381. bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
  2382. {
  2383. fcport->speed = BFA_PORT_SPEED_UNKNOWN;
  2384. fcport->topology = BFA_PORT_TOPOLOGY_NONE;
  2385. }
  2386. /*
  2387. * Send port enable message to firmware.
  2388. */
  2389. static bfa_boolean_t
  2390. bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
  2391. {
  2392. struct bfi_fcport_enable_req_s *m;
  2393. /*
  2394. * Increment message tag before queue check, so that responses to old
  2395. * requests are discarded.
  2396. */
  2397. fcport->msgtag++;
  2398. /*
  2399. * check for room in queue to send request now
  2400. */
  2401. m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
  2402. if (!m) {
  2403. bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
  2404. &fcport->reqq_wait);
  2405. return BFA_FALSE;
  2406. }
  2407. bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
  2408. bfa_lpuid(fcport->bfa));
  2409. m->nwwn = fcport->nwwn;
  2410. m->pwwn = fcport->pwwn;
  2411. m->port_cfg = fcport->cfg;
  2412. m->msgtag = fcport->msgtag;
  2413. m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
  2414. bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
  2415. bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
  2416. bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
  2417. /*
  2418. * queue I/O message to firmware
  2419. */
  2420. bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
  2421. return BFA_TRUE;
  2422. }
  2423. /*
  2424. * Send port disable message to firmware.
  2425. */
  2426. static bfa_boolean_t
  2427. bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
  2428. {
  2429. struct bfi_fcport_req_s *m;
  2430. /*
  2431. * Increment message tag before queue check, so that responses to old
  2432. * requests are discarded.
  2433. */
  2434. fcport->msgtag++;
  2435. /*
  2436. * check for room in queue to send request now
  2437. */
  2438. m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
  2439. if (!m) {
  2440. bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
  2441. &fcport->reqq_wait);
  2442. return BFA_FALSE;
  2443. }
  2444. bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
  2445. bfa_lpuid(fcport->bfa));
  2446. m->msgtag = fcport->msgtag;
  2447. /*
  2448. * queue I/O message to firmware
  2449. */
  2450. bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
  2451. return BFA_TRUE;
  2452. }
  2453. static void
  2454. bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
  2455. {
  2456. fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
  2457. fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
  2458. bfa_trc(fcport->bfa, fcport->pwwn);
  2459. bfa_trc(fcport->bfa, fcport->nwwn);
  2460. }
  2461. static void
  2462. bfa_fcport_send_txcredit(void *port_cbarg)
  2463. {
  2464. struct bfa_fcport_s *fcport = port_cbarg;
  2465. struct bfi_fcport_set_svc_params_req_s *m;
  2466. /*
  2467. * check for room in queue to send request now
  2468. */
  2469. m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
  2470. if (!m) {
  2471. bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
  2472. return;
  2473. }
  2474. bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
  2475. bfa_lpuid(fcport->bfa));
  2476. m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
  2477. /*
  2478. * queue I/O message to firmware
  2479. */
  2480. bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
  2481. }
  2482. static void
  2483. bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
  2484. struct bfa_qos_stats_s *s)
  2485. {
  2486. u32 *dip = (u32 *) d;
  2487. __be32 *sip = (__be32 *) s;
  2488. int i;
  2489. /* Now swap the 32 bit fields */
  2490. for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
  2491. dip[i] = be32_to_cpu(sip[i]);
  2492. }
  2493. static void
  2494. bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
  2495. struct bfa_fcoe_stats_s *s)
  2496. {
  2497. u32 *dip = (u32 *) d;
  2498. __be32 *sip = (__be32 *) s;
  2499. int i;
  2500. for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
  2501. i = i + 2) {
  2502. #ifdef __BIG_ENDIAN
  2503. dip[i] = be32_to_cpu(sip[i]);
  2504. dip[i + 1] = be32_to_cpu(sip[i + 1]);
  2505. #else
  2506. dip[i] = be32_to_cpu(sip[i + 1]);
  2507. dip[i + 1] = be32_to_cpu(sip[i]);
  2508. #endif
  2509. }
  2510. }
  2511. static void
  2512. __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
  2513. {
  2514. struct bfa_fcport_s *fcport = cbarg;
  2515. if (complete) {
  2516. if (fcport->stats_status == BFA_STATUS_OK) {
  2517. struct timeval tv;
  2518. /* Swap FC QoS or FCoE stats */
  2519. if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
  2520. bfa_fcport_qos_stats_swap(
  2521. &fcport->stats_ret->fcqos,
  2522. &fcport->stats->fcqos);
  2523. } else {
  2524. bfa_fcport_fcoe_stats_swap(
  2525. &fcport->stats_ret->fcoe,
  2526. &fcport->stats->fcoe);
  2527. do_gettimeofday(&tv);
  2528. fcport->stats_ret->fcoe.secs_reset =
  2529. tv.tv_sec - fcport->stats_reset_time;
  2530. }
  2531. }
  2532. fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
  2533. } else {
  2534. fcport->stats_busy = BFA_FALSE;
  2535. fcport->stats_status = BFA_STATUS_OK;
  2536. }
  2537. }
  2538. static void
  2539. bfa_fcport_stats_get_timeout(void *cbarg)
  2540. {
  2541. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
  2542. bfa_trc(fcport->bfa, fcport->stats_qfull);
  2543. if (fcport->stats_qfull) {
  2544. bfa_reqq_wcancel(&fcport->stats_reqq_wait);
  2545. fcport->stats_qfull = BFA_FALSE;
  2546. }
  2547. fcport->stats_status = BFA_STATUS_ETIMER;
  2548. bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
  2549. fcport);
  2550. }
  2551. static void
  2552. bfa_fcport_send_stats_get(void *cbarg)
  2553. {
  2554. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
  2555. struct bfi_fcport_req_s *msg;
  2556. msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
  2557. if (!msg) {
  2558. fcport->stats_qfull = BFA_TRUE;
  2559. bfa_reqq_winit(&fcport->stats_reqq_wait,
  2560. bfa_fcport_send_stats_get, fcport);
  2561. bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
  2562. &fcport->stats_reqq_wait);
  2563. return;
  2564. }
  2565. fcport->stats_qfull = BFA_FALSE;
  2566. memset(msg, 0, sizeof(struct bfi_fcport_req_s));
  2567. bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
  2568. bfa_lpuid(fcport->bfa));
  2569. bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
  2570. }
  2571. static void
  2572. __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
  2573. {
  2574. struct bfa_fcport_s *fcport = cbarg;
  2575. if (complete) {
  2576. struct timeval tv;
  2577. /*
  2578. * re-initialize time stamp for stats reset
  2579. */
  2580. do_gettimeofday(&tv);
  2581. fcport->stats_reset_time = tv.tv_sec;
  2582. fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
  2583. } else {
  2584. fcport->stats_busy = BFA_FALSE;
  2585. fcport->stats_status = BFA_STATUS_OK;
  2586. }
  2587. }
  2588. static void
  2589. bfa_fcport_stats_clr_timeout(void *cbarg)
  2590. {
  2591. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
  2592. bfa_trc(fcport->bfa, fcport->stats_qfull);
  2593. if (fcport->stats_qfull) {
  2594. bfa_reqq_wcancel(&fcport->stats_reqq_wait);
  2595. fcport->stats_qfull = BFA_FALSE;
  2596. }
  2597. fcport->stats_status = BFA_STATUS_ETIMER;
  2598. bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
  2599. __bfa_cb_fcport_stats_clr, fcport);
  2600. }
  2601. static void
  2602. bfa_fcport_send_stats_clear(void *cbarg)
  2603. {
  2604. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
  2605. struct bfi_fcport_req_s *msg;
  2606. msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
  2607. if (!msg) {
  2608. fcport->stats_qfull = BFA_TRUE;
  2609. bfa_reqq_winit(&fcport->stats_reqq_wait,
  2610. bfa_fcport_send_stats_clear, fcport);
  2611. bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
  2612. &fcport->stats_reqq_wait);
  2613. return;
  2614. }
  2615. fcport->stats_qfull = BFA_FALSE;
  2616. memset(msg, 0, sizeof(struct bfi_fcport_req_s));
  2617. bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
  2618. bfa_lpuid(fcport->bfa));
  2619. bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
  2620. }
  2621. /*
  2622. * Handle trunk SCN event from firmware.
  2623. */
  2624. static void
  2625. bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
  2626. {
  2627. struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
  2628. struct bfi_fcport_trunk_link_s *tlink;
  2629. struct bfa_trunk_link_attr_s *lattr;
  2630. enum bfa_trunk_state state_prev;
  2631. int i;
  2632. int link_bm = 0;
  2633. bfa_trc(fcport->bfa, fcport->cfg.trunked);
  2634. bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
  2635. scn->trunk_state == BFA_TRUNK_OFFLINE);
  2636. bfa_trc(fcport->bfa, trunk->attr.state);
  2637. bfa_trc(fcport->bfa, scn->trunk_state);
  2638. bfa_trc(fcport->bfa, scn->trunk_speed);
  2639. /*
  2640. * Save off new state for trunk attribute query
  2641. */
  2642. state_prev = trunk->attr.state;
  2643. if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
  2644. trunk->attr.state = scn->trunk_state;
  2645. trunk->attr.speed = scn->trunk_speed;
  2646. for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
  2647. lattr = &trunk->attr.link_attr[i];
  2648. tlink = &scn->tlink[i];
  2649. lattr->link_state = tlink->state;
  2650. lattr->trunk_wwn = tlink->trunk_wwn;
  2651. lattr->fctl = tlink->fctl;
  2652. lattr->speed = tlink->speed;
  2653. lattr->deskew = be32_to_cpu(tlink->deskew);
  2654. if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
  2655. fcport->speed = tlink->speed;
  2656. fcport->topology = BFA_PORT_TOPOLOGY_P2P;
  2657. link_bm |= 1 << i;
  2658. }
  2659. bfa_trc(fcport->bfa, lattr->link_state);
  2660. bfa_trc(fcport->bfa, lattr->trunk_wwn);
  2661. bfa_trc(fcport->bfa, lattr->fctl);
  2662. bfa_trc(fcport->bfa, lattr->speed);
  2663. bfa_trc(fcport->bfa, lattr->deskew);
  2664. }
  2665. switch (link_bm) {
  2666. case 3:
  2667. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2668. BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
  2669. break;
  2670. case 2:
  2671. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2672. BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
  2673. break;
  2674. case 1:
  2675. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2676. BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
  2677. break;
  2678. default:
  2679. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2680. BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
  2681. }
  2682. /*
  2683. * Notify upper layers if trunk state changed.
  2684. */
  2685. if ((state_prev != trunk->attr.state) ||
  2686. (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
  2687. bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
  2688. BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
  2689. }
  2690. }
  2691. static void
  2692. bfa_trunk_iocdisable(struct bfa_s *bfa)
  2693. {
  2694. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2695. int i = 0;
  2696. /*
  2697. * In trunked mode, notify upper layers that link is down
  2698. */
  2699. if (fcport->cfg.trunked) {
  2700. if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
  2701. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
  2702. fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
  2703. fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
  2704. for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
  2705. fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
  2706. fcport->trunk.attr.link_attr[i].fctl =
  2707. BFA_TRUNK_LINK_FCTL_NORMAL;
  2708. fcport->trunk.attr.link_attr[i].link_state =
  2709. BFA_TRUNK_LINK_STATE_DN_LINKDN;
  2710. fcport->trunk.attr.link_attr[i].speed =
  2711. BFA_PORT_SPEED_UNKNOWN;
  2712. fcport->trunk.attr.link_attr[i].deskew = 0;
  2713. }
  2714. }
  2715. }
  2716. /*
  2717. * Called to initialize port attributes
  2718. */
  2719. void
  2720. bfa_fcport_init(struct bfa_s *bfa)
  2721. {
  2722. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2723. /*
  2724. * Initialize port attributes from IOC hardware data.
  2725. */
  2726. bfa_fcport_set_wwns(fcport);
  2727. if (fcport->cfg.maxfrsize == 0)
  2728. fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
  2729. fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
  2730. fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
  2731. bfa_assert(fcport->cfg.maxfrsize);
  2732. bfa_assert(fcport->cfg.rx_bbcredit);
  2733. bfa_assert(fcport->speed_sup);
  2734. }
  2735. /*
  2736. * Firmware message handler.
  2737. */
  2738. void
  2739. bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
  2740. {
  2741. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2742. union bfi_fcport_i2h_msg_u i2hmsg;
  2743. i2hmsg.msg = msg;
  2744. fcport->event_arg.i2hmsg = i2hmsg;
  2745. bfa_trc(bfa, msg->mhdr.msg_id);
  2746. bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
  2747. switch (msg->mhdr.msg_id) {
  2748. case BFI_FCPORT_I2H_ENABLE_RSP:
  2749. if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
  2750. bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
  2751. break;
  2752. case BFI_FCPORT_I2H_DISABLE_RSP:
  2753. if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
  2754. bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
  2755. break;
  2756. case BFI_FCPORT_I2H_EVENT:
  2757. if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
  2758. bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
  2759. else
  2760. bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
  2761. break;
  2762. case BFI_FCPORT_I2H_TRUNK_SCN:
  2763. bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
  2764. break;
  2765. case BFI_FCPORT_I2H_STATS_GET_RSP:
  2766. /*
  2767. * check for timer pop before processing the rsp
  2768. */
  2769. if (fcport->stats_busy == BFA_FALSE ||
  2770. fcport->stats_status == BFA_STATUS_ETIMER)
  2771. break;
  2772. bfa_timer_stop(&fcport->timer);
  2773. fcport->stats_status = i2hmsg.pstatsget_rsp->status;
  2774. bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
  2775. __bfa_cb_fcport_stats_get, fcport);
  2776. break;
  2777. case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
  2778. /*
  2779. * check for timer pop before processing the rsp
  2780. */
  2781. if (fcport->stats_busy == BFA_FALSE ||
  2782. fcport->stats_status == BFA_STATUS_ETIMER)
  2783. break;
  2784. bfa_timer_stop(&fcport->timer);
  2785. fcport->stats_status = BFA_STATUS_OK;
  2786. bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
  2787. __bfa_cb_fcport_stats_clr, fcport);
  2788. break;
  2789. case BFI_FCPORT_I2H_ENABLE_AEN:
  2790. bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
  2791. break;
  2792. case BFI_FCPORT_I2H_DISABLE_AEN:
  2793. bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
  2794. break;
  2795. default:
  2796. bfa_assert(0);
  2797. break;
  2798. }
  2799. }
  2800. /*
  2801. * Registered callback for port events.
  2802. */
  2803. void
  2804. bfa_fcport_event_register(struct bfa_s *bfa,
  2805. void (*cbfn) (void *cbarg,
  2806. enum bfa_port_linkstate event),
  2807. void *cbarg)
  2808. {
  2809. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2810. fcport->event_cbfn = cbfn;
  2811. fcport->event_cbarg = cbarg;
  2812. }
  2813. bfa_status_t
  2814. bfa_fcport_enable(struct bfa_s *bfa)
  2815. {
  2816. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2817. if (bfa_ioc_is_disabled(&bfa->ioc))
  2818. return BFA_STATUS_IOC_DISABLED;
  2819. if (fcport->diag_busy)
  2820. return BFA_STATUS_DIAG_BUSY;
  2821. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
  2822. return BFA_STATUS_OK;
  2823. }
  2824. bfa_status_t
  2825. bfa_fcport_disable(struct bfa_s *bfa)
  2826. {
  2827. if (bfa_ioc_is_disabled(&bfa->ioc))
  2828. return BFA_STATUS_IOC_DISABLED;
  2829. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
  2830. return BFA_STATUS_OK;
  2831. }
  2832. /*
  2833. * Configure port speed.
  2834. */
  2835. bfa_status_t
  2836. bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
  2837. {
  2838. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2839. bfa_trc(bfa, speed);
  2840. if (fcport->cfg.trunked == BFA_TRUE)
  2841. return BFA_STATUS_TRUNK_ENABLED;
  2842. if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
  2843. bfa_trc(bfa, fcport->speed_sup);
  2844. return BFA_STATUS_UNSUPP_SPEED;
  2845. }
  2846. fcport->cfg.speed = speed;
  2847. return BFA_STATUS_OK;
  2848. }
  2849. /*
  2850. * Get current speed.
  2851. */
  2852. enum bfa_port_speed
  2853. bfa_fcport_get_speed(struct bfa_s *bfa)
  2854. {
  2855. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2856. return fcport->speed;
  2857. }
  2858. /*
  2859. * Configure port topology.
  2860. */
  2861. bfa_status_t
  2862. bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
  2863. {
  2864. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2865. bfa_trc(bfa, topology);
  2866. bfa_trc(bfa, fcport->cfg.topology);
  2867. switch (topology) {
  2868. case BFA_PORT_TOPOLOGY_P2P:
  2869. case BFA_PORT_TOPOLOGY_LOOP:
  2870. case BFA_PORT_TOPOLOGY_AUTO:
  2871. break;
  2872. default:
  2873. return BFA_STATUS_EINVAL;
  2874. }
  2875. fcport->cfg.topology = topology;
  2876. return BFA_STATUS_OK;
  2877. }
  2878. /*
  2879. * Get current topology.
  2880. */
  2881. enum bfa_port_topology
  2882. bfa_fcport_get_topology(struct bfa_s *bfa)
  2883. {
  2884. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2885. return fcport->topology;
  2886. }
  2887. bfa_status_t
  2888. bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
  2889. {
  2890. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2891. bfa_trc(bfa, alpa);
  2892. bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
  2893. bfa_trc(bfa, fcport->cfg.hardalpa);
  2894. fcport->cfg.cfg_hardalpa = BFA_TRUE;
  2895. fcport->cfg.hardalpa = alpa;
  2896. return BFA_STATUS_OK;
  2897. }
  2898. bfa_status_t
  2899. bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
  2900. {
  2901. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2902. bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
  2903. bfa_trc(bfa, fcport->cfg.hardalpa);
  2904. fcport->cfg.cfg_hardalpa = BFA_FALSE;
  2905. return BFA_STATUS_OK;
  2906. }
  2907. bfa_boolean_t
  2908. bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
  2909. {
  2910. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2911. *alpa = fcport->cfg.hardalpa;
  2912. return fcport->cfg.cfg_hardalpa;
  2913. }
  2914. u8
  2915. bfa_fcport_get_myalpa(struct bfa_s *bfa)
  2916. {
  2917. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2918. return fcport->myalpa;
  2919. }
  2920. bfa_status_t
  2921. bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
  2922. {
  2923. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2924. bfa_trc(bfa, maxfrsize);
  2925. bfa_trc(bfa, fcport->cfg.maxfrsize);
  2926. /* with in range */
  2927. if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
  2928. return BFA_STATUS_INVLD_DFSZ;
  2929. /* power of 2, if not the max frame size of 2112 */
  2930. if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
  2931. return BFA_STATUS_INVLD_DFSZ;
  2932. fcport->cfg.maxfrsize = maxfrsize;
  2933. return BFA_STATUS_OK;
  2934. }
  2935. u16
  2936. bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
  2937. {
  2938. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2939. return fcport->cfg.maxfrsize;
  2940. }
  2941. u8
  2942. bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
  2943. {
  2944. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2945. return fcport->cfg.rx_bbcredit;
  2946. }
  2947. void
  2948. bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
  2949. {
  2950. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2951. fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
  2952. bfa_fcport_send_txcredit(fcport);
  2953. }
  2954. /*
  2955. * Get port attributes.
  2956. */
  2957. wwn_t
  2958. bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
  2959. {
  2960. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2961. if (node)
  2962. return fcport->nwwn;
  2963. else
  2964. return fcport->pwwn;
  2965. }
  2966. void
  2967. bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
  2968. {
  2969. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2970. memset(attr, 0, sizeof(struct bfa_port_attr_s));
  2971. attr->nwwn = fcport->nwwn;
  2972. attr->pwwn = fcport->pwwn;
  2973. attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
  2974. attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
  2975. memcpy(&attr->pport_cfg, &fcport->cfg,
  2976. sizeof(struct bfa_port_cfg_s));
  2977. /* speed attributes */
  2978. attr->pport_cfg.speed = fcport->cfg.speed;
  2979. attr->speed_supported = fcport->speed_sup;
  2980. attr->speed = fcport->speed;
  2981. attr->cos_supported = FC_CLASS_3;
  2982. /* topology attributes */
  2983. attr->pport_cfg.topology = fcport->cfg.topology;
  2984. attr->topology = fcport->topology;
  2985. attr->pport_cfg.trunked = fcport->cfg.trunked;
  2986. /* beacon attributes */
  2987. attr->beacon = fcport->beacon;
  2988. attr->link_e2e_beacon = fcport->link_e2e_beacon;
  2989. attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
  2990. attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
  2991. attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
  2992. attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
  2993. attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
  2994. if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
  2995. attr->port_state = BFA_PORT_ST_IOCDIS;
  2996. else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
  2997. attr->port_state = BFA_PORT_ST_FWMISMATCH;
  2998. /* FCoE vlan */
  2999. attr->fcoe_vlan = fcport->fcoe_vlan;
  3000. }
  3001. #define BFA_FCPORT_STATS_TOV 1000
  3002. /*
  3003. * Fetch port statistics (FCQoS or FCoE).
  3004. */
  3005. bfa_status_t
  3006. bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
  3007. bfa_cb_port_t cbfn, void *cbarg)
  3008. {
  3009. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3010. if (fcport->stats_busy) {
  3011. bfa_trc(bfa, fcport->stats_busy);
  3012. return BFA_STATUS_DEVBUSY;
  3013. }
  3014. fcport->stats_busy = BFA_TRUE;
  3015. fcport->stats_ret = stats;
  3016. fcport->stats_cbfn = cbfn;
  3017. fcport->stats_cbarg = cbarg;
  3018. bfa_fcport_send_stats_get(fcport);
  3019. bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
  3020. fcport, BFA_FCPORT_STATS_TOV);
  3021. return BFA_STATUS_OK;
  3022. }
  3023. /*
  3024. * Reset port statistics (FCQoS or FCoE).
  3025. */
  3026. bfa_status_t
  3027. bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
  3028. {
  3029. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3030. if (fcport->stats_busy) {
  3031. bfa_trc(bfa, fcport->stats_busy);
  3032. return BFA_STATUS_DEVBUSY;
  3033. }
  3034. fcport->stats_busy = BFA_TRUE;
  3035. fcport->stats_cbfn = cbfn;
  3036. fcport->stats_cbarg = cbarg;
  3037. bfa_fcport_send_stats_clear(fcport);
  3038. bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
  3039. fcport, BFA_FCPORT_STATS_TOV);
  3040. return BFA_STATUS_OK;
  3041. }
  3042. /*
  3043. * Fetch port attributes.
  3044. */
  3045. bfa_boolean_t
  3046. bfa_fcport_is_disabled(struct bfa_s *bfa)
  3047. {
  3048. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3049. return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
  3050. BFA_PORT_ST_DISABLED;
  3051. }
  3052. bfa_boolean_t
  3053. bfa_fcport_is_ratelim(struct bfa_s *bfa)
  3054. {
  3055. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3056. return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
  3057. }
  3058. /*
  3059. * Get default minimum ratelim speed
  3060. */
  3061. enum bfa_port_speed
  3062. bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
  3063. {
  3064. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3065. bfa_trc(bfa, fcport->cfg.trl_def_speed);
  3066. return fcport->cfg.trl_def_speed;
  3067. }
  3068. bfa_boolean_t
  3069. bfa_fcport_is_linkup(struct bfa_s *bfa)
  3070. {
  3071. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3072. return (!fcport->cfg.trunked &&
  3073. bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
  3074. (fcport->cfg.trunked &&
  3075. fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
  3076. }
  3077. bfa_boolean_t
  3078. bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
  3079. {
  3080. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3081. return fcport->cfg.qos_enabled;
  3082. }
  3083. /*
  3084. * Rport State machine functions
  3085. */
  3086. /*
  3087. * Beginning state, only online event expected.
  3088. */
  3089. static void
  3090. bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3091. {
  3092. bfa_trc(rp->bfa, rp->rport_tag);
  3093. bfa_trc(rp->bfa, event);
  3094. switch (event) {
  3095. case BFA_RPORT_SM_CREATE:
  3096. bfa_stats(rp, sm_un_cr);
  3097. bfa_sm_set_state(rp, bfa_rport_sm_created);
  3098. break;
  3099. default:
  3100. bfa_stats(rp, sm_un_unexp);
  3101. bfa_sm_fault(rp->bfa, event);
  3102. }
  3103. }
  3104. static void
  3105. bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3106. {
  3107. bfa_trc(rp->bfa, rp->rport_tag);
  3108. bfa_trc(rp->bfa, event);
  3109. switch (event) {
  3110. case BFA_RPORT_SM_ONLINE:
  3111. bfa_stats(rp, sm_cr_on);
  3112. if (bfa_rport_send_fwcreate(rp))
  3113. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
  3114. else
  3115. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
  3116. break;
  3117. case BFA_RPORT_SM_DELETE:
  3118. bfa_stats(rp, sm_cr_del);
  3119. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3120. bfa_rport_free(rp);
  3121. break;
  3122. case BFA_RPORT_SM_HWFAIL:
  3123. bfa_stats(rp, sm_cr_hwf);
  3124. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3125. break;
  3126. default:
  3127. bfa_stats(rp, sm_cr_unexp);
  3128. bfa_sm_fault(rp->bfa, event);
  3129. }
  3130. }
  3131. /*
  3132. * Waiting for rport create response from firmware.
  3133. */
  3134. static void
  3135. bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3136. {
  3137. bfa_trc(rp->bfa, rp->rport_tag);
  3138. bfa_trc(rp->bfa, event);
  3139. switch (event) {
  3140. case BFA_RPORT_SM_FWRSP:
  3141. bfa_stats(rp, sm_fwc_rsp);
  3142. bfa_sm_set_state(rp, bfa_rport_sm_online);
  3143. bfa_rport_online_cb(rp);
  3144. break;
  3145. case BFA_RPORT_SM_DELETE:
  3146. bfa_stats(rp, sm_fwc_del);
  3147. bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
  3148. break;
  3149. case BFA_RPORT_SM_OFFLINE:
  3150. bfa_stats(rp, sm_fwc_off);
  3151. bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
  3152. break;
  3153. case BFA_RPORT_SM_HWFAIL:
  3154. bfa_stats(rp, sm_fwc_hwf);
  3155. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3156. break;
  3157. default:
  3158. bfa_stats(rp, sm_fwc_unexp);
  3159. bfa_sm_fault(rp->bfa, event);
  3160. }
  3161. }
  3162. /*
  3163. * Request queue is full, awaiting queue resume to send create request.
  3164. */
  3165. static void
  3166. bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3167. {
  3168. bfa_trc(rp->bfa, rp->rport_tag);
  3169. bfa_trc(rp->bfa, event);
  3170. switch (event) {
  3171. case BFA_RPORT_SM_QRESUME:
  3172. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
  3173. bfa_rport_send_fwcreate(rp);
  3174. break;
  3175. case BFA_RPORT_SM_DELETE:
  3176. bfa_stats(rp, sm_fwc_del);
  3177. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3178. bfa_reqq_wcancel(&rp->reqq_wait);
  3179. bfa_rport_free(rp);
  3180. break;
  3181. case BFA_RPORT_SM_OFFLINE:
  3182. bfa_stats(rp, sm_fwc_off);
  3183. bfa_sm_set_state(rp, bfa_rport_sm_offline);
  3184. bfa_reqq_wcancel(&rp->reqq_wait);
  3185. bfa_rport_offline_cb(rp);
  3186. break;
  3187. case BFA_RPORT_SM_HWFAIL:
  3188. bfa_stats(rp, sm_fwc_hwf);
  3189. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3190. bfa_reqq_wcancel(&rp->reqq_wait);
  3191. break;
  3192. default:
  3193. bfa_stats(rp, sm_fwc_unexp);
  3194. bfa_sm_fault(rp->bfa, event);
  3195. }
  3196. }
  3197. /*
  3198. * Online state - normal parking state.
  3199. */
  3200. static void
  3201. bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3202. {
  3203. struct bfi_rport_qos_scn_s *qos_scn;
  3204. bfa_trc(rp->bfa, rp->rport_tag);
  3205. bfa_trc(rp->bfa, event);
  3206. switch (event) {
  3207. case BFA_RPORT_SM_OFFLINE:
  3208. bfa_stats(rp, sm_on_off);
  3209. if (bfa_rport_send_fwdelete(rp))
  3210. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
  3211. else
  3212. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
  3213. break;
  3214. case BFA_RPORT_SM_DELETE:
  3215. bfa_stats(rp, sm_on_del);
  3216. if (bfa_rport_send_fwdelete(rp))
  3217. bfa_sm_set_state(rp, bfa_rport_sm_deleting);
  3218. else
  3219. bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
  3220. break;
  3221. case BFA_RPORT_SM_HWFAIL:
  3222. bfa_stats(rp, sm_on_hwf);
  3223. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3224. break;
  3225. case BFA_RPORT_SM_SET_SPEED:
  3226. bfa_rport_send_fwspeed(rp);
  3227. break;
  3228. case BFA_RPORT_SM_QOS_SCN:
  3229. qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
  3230. rp->qos_attr = qos_scn->new_qos_attr;
  3231. bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
  3232. bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
  3233. bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
  3234. bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
  3235. qos_scn->old_qos_attr.qos_flow_id =
  3236. be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
  3237. qos_scn->new_qos_attr.qos_flow_id =
  3238. be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
  3239. if (qos_scn->old_qos_attr.qos_flow_id !=
  3240. qos_scn->new_qos_attr.qos_flow_id)
  3241. bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
  3242. qos_scn->old_qos_attr,
  3243. qos_scn->new_qos_attr);
  3244. if (qos_scn->old_qos_attr.qos_priority !=
  3245. qos_scn->new_qos_attr.qos_priority)
  3246. bfa_cb_rport_qos_scn_prio(rp->rport_drv,
  3247. qos_scn->old_qos_attr,
  3248. qos_scn->new_qos_attr);
  3249. break;
  3250. default:
  3251. bfa_stats(rp, sm_on_unexp);
  3252. bfa_sm_fault(rp->bfa, event);
  3253. }
  3254. }
  3255. /*
  3256. * Firmware rport is being deleted - awaiting f/w response.
  3257. */
  3258. static void
  3259. bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3260. {
  3261. bfa_trc(rp->bfa, rp->rport_tag);
  3262. bfa_trc(rp->bfa, event);
  3263. switch (event) {
  3264. case BFA_RPORT_SM_FWRSP:
  3265. bfa_stats(rp, sm_fwd_rsp);
  3266. bfa_sm_set_state(rp, bfa_rport_sm_offline);
  3267. bfa_rport_offline_cb(rp);
  3268. break;
  3269. case BFA_RPORT_SM_DELETE:
  3270. bfa_stats(rp, sm_fwd_del);
  3271. bfa_sm_set_state(rp, bfa_rport_sm_deleting);
  3272. break;
  3273. case BFA_RPORT_SM_HWFAIL:
  3274. bfa_stats(rp, sm_fwd_hwf);
  3275. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3276. bfa_rport_offline_cb(rp);
  3277. break;
  3278. default:
  3279. bfa_stats(rp, sm_fwd_unexp);
  3280. bfa_sm_fault(rp->bfa, event);
  3281. }
  3282. }
  3283. static void
  3284. bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3285. {
  3286. bfa_trc(rp->bfa, rp->rport_tag);
  3287. bfa_trc(rp->bfa, event);
  3288. switch (event) {
  3289. case BFA_RPORT_SM_QRESUME:
  3290. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
  3291. bfa_rport_send_fwdelete(rp);
  3292. break;
  3293. case BFA_RPORT_SM_DELETE:
  3294. bfa_stats(rp, sm_fwd_del);
  3295. bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
  3296. break;
  3297. case BFA_RPORT_SM_HWFAIL:
  3298. bfa_stats(rp, sm_fwd_hwf);
  3299. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3300. bfa_reqq_wcancel(&rp->reqq_wait);
  3301. bfa_rport_offline_cb(rp);
  3302. break;
  3303. default:
  3304. bfa_stats(rp, sm_fwd_unexp);
  3305. bfa_sm_fault(rp->bfa, event);
  3306. }
  3307. }
  3308. /*
  3309. * Offline state.
  3310. */
  3311. static void
  3312. bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3313. {
  3314. bfa_trc(rp->bfa, rp->rport_tag);
  3315. bfa_trc(rp->bfa, event);
  3316. switch (event) {
  3317. case BFA_RPORT_SM_DELETE:
  3318. bfa_stats(rp, sm_off_del);
  3319. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3320. bfa_rport_free(rp);
  3321. break;
  3322. case BFA_RPORT_SM_ONLINE:
  3323. bfa_stats(rp, sm_off_on);
  3324. if (bfa_rport_send_fwcreate(rp))
  3325. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
  3326. else
  3327. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
  3328. break;
  3329. case BFA_RPORT_SM_HWFAIL:
  3330. bfa_stats(rp, sm_off_hwf);
  3331. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3332. break;
  3333. default:
  3334. bfa_stats(rp, sm_off_unexp);
  3335. bfa_sm_fault(rp->bfa, event);
  3336. }
  3337. }
  3338. /*
  3339. * Rport is deleted, waiting for firmware response to delete.
  3340. */
  3341. static void
  3342. bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3343. {
  3344. bfa_trc(rp->bfa, rp->rport_tag);
  3345. bfa_trc(rp->bfa, event);
  3346. switch (event) {
  3347. case BFA_RPORT_SM_FWRSP:
  3348. bfa_stats(rp, sm_del_fwrsp);
  3349. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3350. bfa_rport_free(rp);
  3351. break;
  3352. case BFA_RPORT_SM_HWFAIL:
  3353. bfa_stats(rp, sm_del_hwf);
  3354. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3355. bfa_rport_free(rp);
  3356. break;
  3357. default:
  3358. bfa_sm_fault(rp->bfa, event);
  3359. }
  3360. }
  3361. static void
  3362. bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3363. {
  3364. bfa_trc(rp->bfa, rp->rport_tag);
  3365. bfa_trc(rp->bfa, event);
  3366. switch (event) {
  3367. case BFA_RPORT_SM_QRESUME:
  3368. bfa_stats(rp, sm_del_fwrsp);
  3369. bfa_sm_set_state(rp, bfa_rport_sm_deleting);
  3370. bfa_rport_send_fwdelete(rp);
  3371. break;
  3372. case BFA_RPORT_SM_HWFAIL:
  3373. bfa_stats(rp, sm_del_hwf);
  3374. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3375. bfa_reqq_wcancel(&rp->reqq_wait);
  3376. bfa_rport_free(rp);
  3377. break;
  3378. default:
  3379. bfa_sm_fault(rp->bfa, event);
  3380. }
  3381. }
  3382. /*
  3383. * Waiting for rport create response from firmware. A delete is pending.
  3384. */
  3385. static void
  3386. bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
  3387. enum bfa_rport_event event)
  3388. {
  3389. bfa_trc(rp->bfa, rp->rport_tag);
  3390. bfa_trc(rp->bfa, event);
  3391. switch (event) {
  3392. case BFA_RPORT_SM_FWRSP:
  3393. bfa_stats(rp, sm_delp_fwrsp);
  3394. if (bfa_rport_send_fwdelete(rp))
  3395. bfa_sm_set_state(rp, bfa_rport_sm_deleting);
  3396. else
  3397. bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
  3398. break;
  3399. case BFA_RPORT_SM_HWFAIL:
  3400. bfa_stats(rp, sm_delp_hwf);
  3401. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3402. bfa_rport_free(rp);
  3403. break;
  3404. default:
  3405. bfa_stats(rp, sm_delp_unexp);
  3406. bfa_sm_fault(rp->bfa, event);
  3407. }
  3408. }
  3409. /*
  3410. * Waiting for rport create response from firmware. Rport offline is pending.
  3411. */
  3412. static void
  3413. bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
  3414. enum bfa_rport_event event)
  3415. {
  3416. bfa_trc(rp->bfa, rp->rport_tag);
  3417. bfa_trc(rp->bfa, event);
  3418. switch (event) {
  3419. case BFA_RPORT_SM_FWRSP:
  3420. bfa_stats(rp, sm_offp_fwrsp);
  3421. if (bfa_rport_send_fwdelete(rp))
  3422. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
  3423. else
  3424. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
  3425. break;
  3426. case BFA_RPORT_SM_DELETE:
  3427. bfa_stats(rp, sm_offp_del);
  3428. bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
  3429. break;
  3430. case BFA_RPORT_SM_HWFAIL:
  3431. bfa_stats(rp, sm_offp_hwf);
  3432. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3433. break;
  3434. default:
  3435. bfa_stats(rp, sm_offp_unexp);
  3436. bfa_sm_fault(rp->bfa, event);
  3437. }
  3438. }
  3439. /*
  3440. * IOC h/w failed.
  3441. */
  3442. static void
  3443. bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3444. {
  3445. bfa_trc(rp->bfa, rp->rport_tag);
  3446. bfa_trc(rp->bfa, event);
  3447. switch (event) {
  3448. case BFA_RPORT_SM_OFFLINE:
  3449. bfa_stats(rp, sm_iocd_off);
  3450. bfa_rport_offline_cb(rp);
  3451. break;
  3452. case BFA_RPORT_SM_DELETE:
  3453. bfa_stats(rp, sm_iocd_del);
  3454. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3455. bfa_rport_free(rp);
  3456. break;
  3457. case BFA_RPORT_SM_ONLINE:
  3458. bfa_stats(rp, sm_iocd_on);
  3459. if (bfa_rport_send_fwcreate(rp))
  3460. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
  3461. else
  3462. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
  3463. break;
  3464. case BFA_RPORT_SM_HWFAIL:
  3465. break;
  3466. default:
  3467. bfa_stats(rp, sm_iocd_unexp);
  3468. bfa_sm_fault(rp->bfa, event);
  3469. }
  3470. }
  3471. /*
  3472. * bfa_rport_private BFA rport private functions
  3473. */
  3474. static void
  3475. __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
  3476. {
  3477. struct bfa_rport_s *rp = cbarg;
  3478. if (complete)
  3479. bfa_cb_rport_online(rp->rport_drv);
  3480. }
  3481. static void
  3482. __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
  3483. {
  3484. struct bfa_rport_s *rp = cbarg;
  3485. if (complete)
  3486. bfa_cb_rport_offline(rp->rport_drv);
  3487. }
  3488. static void
  3489. bfa_rport_qresume(void *cbarg)
  3490. {
  3491. struct bfa_rport_s *rp = cbarg;
  3492. bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
  3493. }
  3494. static void
  3495. bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
  3496. u32 *dm_len)
  3497. {
  3498. if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
  3499. cfg->fwcfg.num_rports = BFA_RPORT_MIN;
  3500. *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
  3501. }
  3502. static void
  3503. bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  3504. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  3505. {
  3506. struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
  3507. struct bfa_rport_s *rp;
  3508. u16 i;
  3509. INIT_LIST_HEAD(&mod->rp_free_q);
  3510. INIT_LIST_HEAD(&mod->rp_active_q);
  3511. rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
  3512. mod->rps_list = rp;
  3513. mod->num_rports = cfg->fwcfg.num_rports;
  3514. bfa_assert(mod->num_rports &&
  3515. !(mod->num_rports & (mod->num_rports - 1)));
  3516. for (i = 0; i < mod->num_rports; i++, rp++) {
  3517. memset(rp, 0, sizeof(struct bfa_rport_s));
  3518. rp->bfa = bfa;
  3519. rp->rport_tag = i;
  3520. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3521. /*
  3522. * - is unused
  3523. */
  3524. if (i)
  3525. list_add_tail(&rp->qe, &mod->rp_free_q);
  3526. bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
  3527. }
  3528. /*
  3529. * consume memory
  3530. */
  3531. bfa_meminfo_kva(meminfo) = (u8 *) rp;
  3532. }
  3533. static void
  3534. bfa_rport_detach(struct bfa_s *bfa)
  3535. {
  3536. }
  3537. static void
  3538. bfa_rport_start(struct bfa_s *bfa)
  3539. {
  3540. }
  3541. static void
  3542. bfa_rport_stop(struct bfa_s *bfa)
  3543. {
  3544. }
  3545. static void
  3546. bfa_rport_iocdisable(struct bfa_s *bfa)
  3547. {
  3548. struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
  3549. struct bfa_rport_s *rport;
  3550. struct list_head *qe, *qen;
  3551. list_for_each_safe(qe, qen, &mod->rp_active_q) {
  3552. rport = (struct bfa_rport_s *) qe;
  3553. bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
  3554. }
  3555. }
  3556. static struct bfa_rport_s *
  3557. bfa_rport_alloc(struct bfa_rport_mod_s *mod)
  3558. {
  3559. struct bfa_rport_s *rport;
  3560. bfa_q_deq(&mod->rp_free_q, &rport);
  3561. if (rport)
  3562. list_add_tail(&rport->qe, &mod->rp_active_q);
  3563. return rport;
  3564. }
  3565. static void
  3566. bfa_rport_free(struct bfa_rport_s *rport)
  3567. {
  3568. struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
  3569. bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
  3570. list_del(&rport->qe);
  3571. list_add_tail(&rport->qe, &mod->rp_free_q);
  3572. }
  3573. static bfa_boolean_t
  3574. bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
  3575. {
  3576. struct bfi_rport_create_req_s *m;
  3577. /*
  3578. * check for room in queue to send request now
  3579. */
  3580. m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
  3581. if (!m) {
  3582. bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
  3583. return BFA_FALSE;
  3584. }
  3585. bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
  3586. bfa_lpuid(rp->bfa));
  3587. m->bfa_handle = rp->rport_tag;
  3588. m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
  3589. m->pid = rp->rport_info.pid;
  3590. m->lp_tag = rp->rport_info.lp_tag;
  3591. m->local_pid = rp->rport_info.local_pid;
  3592. m->fc_class = rp->rport_info.fc_class;
  3593. m->vf_en = rp->rport_info.vf_en;
  3594. m->vf_id = rp->rport_info.vf_id;
  3595. m->cisc = rp->rport_info.cisc;
  3596. /*
  3597. * queue I/O message to firmware
  3598. */
  3599. bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
  3600. return BFA_TRUE;
  3601. }
  3602. static bfa_boolean_t
  3603. bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
  3604. {
  3605. struct bfi_rport_delete_req_s *m;
  3606. /*
  3607. * check for room in queue to send request now
  3608. */
  3609. m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
  3610. if (!m) {
  3611. bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
  3612. return BFA_FALSE;
  3613. }
  3614. bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
  3615. bfa_lpuid(rp->bfa));
  3616. m->fw_handle = rp->fw_handle;
  3617. /*
  3618. * queue I/O message to firmware
  3619. */
  3620. bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
  3621. return BFA_TRUE;
  3622. }
  3623. static bfa_boolean_t
  3624. bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
  3625. {
  3626. struct bfa_rport_speed_req_s *m;
  3627. /*
  3628. * check for room in queue to send request now
  3629. */
  3630. m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
  3631. if (!m) {
  3632. bfa_trc(rp->bfa, rp->rport_info.speed);
  3633. return BFA_FALSE;
  3634. }
  3635. bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
  3636. bfa_lpuid(rp->bfa));
  3637. m->fw_handle = rp->fw_handle;
  3638. m->speed = (u8)rp->rport_info.speed;
  3639. /*
  3640. * queue I/O message to firmware
  3641. */
  3642. bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
  3643. return BFA_TRUE;
  3644. }
  3645. /*
  3646. * bfa_rport_public
  3647. */
  3648. /*
  3649. * Rport interrupt processing.
  3650. */
  3651. void
  3652. bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  3653. {
  3654. union bfi_rport_i2h_msg_u msg;
  3655. struct bfa_rport_s *rp;
  3656. bfa_trc(bfa, m->mhdr.msg_id);
  3657. msg.msg = m;
  3658. switch (m->mhdr.msg_id) {
  3659. case BFI_RPORT_I2H_CREATE_RSP:
  3660. rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
  3661. rp->fw_handle = msg.create_rsp->fw_handle;
  3662. rp->qos_attr = msg.create_rsp->qos_attr;
  3663. bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
  3664. bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
  3665. break;
  3666. case BFI_RPORT_I2H_DELETE_RSP:
  3667. rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
  3668. bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
  3669. bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
  3670. break;
  3671. case BFI_RPORT_I2H_QOS_SCN:
  3672. rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
  3673. rp->event_arg.fw_msg = msg.qos_scn_evt;
  3674. bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
  3675. break;
  3676. default:
  3677. bfa_trc(bfa, m->mhdr.msg_id);
  3678. bfa_assert(0);
  3679. }
  3680. }
  3681. /*
  3682. * bfa_rport_api
  3683. */
  3684. struct bfa_rport_s *
  3685. bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
  3686. {
  3687. struct bfa_rport_s *rp;
  3688. rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
  3689. if (rp == NULL)
  3690. return NULL;
  3691. rp->bfa = bfa;
  3692. rp->rport_drv = rport_drv;
  3693. memset(&rp->stats, 0, sizeof(rp->stats));
  3694. bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
  3695. bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
  3696. return rp;
  3697. }
  3698. void
  3699. bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
  3700. {
  3701. bfa_assert(rport_info->max_frmsz != 0);
  3702. /*
  3703. * Some JBODs are seen to be not setting PDU size correctly in PLOGI
  3704. * responses. Default to minimum size.
  3705. */
  3706. if (rport_info->max_frmsz == 0) {
  3707. bfa_trc(rport->bfa, rport->rport_tag);
  3708. rport_info->max_frmsz = FC_MIN_PDUSZ;
  3709. }
  3710. rport->rport_info = *rport_info;
  3711. bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
  3712. }
  3713. void
  3714. bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
  3715. {
  3716. bfa_assert(speed != 0);
  3717. bfa_assert(speed != BFA_PORT_SPEED_AUTO);
  3718. rport->rport_info.speed = speed;
  3719. bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
  3720. }
  3721. /*
  3722. * SGPG related functions
  3723. */
  3724. /*
  3725. * Compute and return memory needed by FCP(im) module.
  3726. */
  3727. static void
  3728. bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
  3729. u32 *dm_len)
  3730. {
  3731. if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
  3732. cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
  3733. *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
  3734. *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
  3735. }
  3736. static void
  3737. bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  3738. struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
  3739. {
  3740. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  3741. int i;
  3742. struct bfa_sgpg_s *hsgpg;
  3743. struct bfi_sgpg_s *sgpg;
  3744. u64 align_len;
  3745. union {
  3746. u64 pa;
  3747. union bfi_addr_u addr;
  3748. } sgpg_pa, sgpg_pa_tmp;
  3749. INIT_LIST_HEAD(&mod->sgpg_q);
  3750. INIT_LIST_HEAD(&mod->sgpg_wait_q);
  3751. bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
  3752. mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
  3753. mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
  3754. align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
  3755. mod->sgpg_arr_pa += align_len;
  3756. mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
  3757. align_len);
  3758. mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
  3759. align_len);
  3760. hsgpg = mod->hsgpg_arr;
  3761. sgpg = mod->sgpg_arr;
  3762. sgpg_pa.pa = mod->sgpg_arr_pa;
  3763. mod->free_sgpgs = mod->num_sgpgs;
  3764. bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
  3765. for (i = 0; i < mod->num_sgpgs; i++) {
  3766. memset(hsgpg, 0, sizeof(*hsgpg));
  3767. memset(sgpg, 0, sizeof(*sgpg));
  3768. hsgpg->sgpg = sgpg;
  3769. sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
  3770. hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
  3771. list_add_tail(&hsgpg->qe, &mod->sgpg_q);
  3772. hsgpg++;
  3773. sgpg++;
  3774. sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
  3775. }
  3776. bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
  3777. bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
  3778. bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
  3779. }
  3780. static void
  3781. bfa_sgpg_detach(struct bfa_s *bfa)
  3782. {
  3783. }
  3784. static void
  3785. bfa_sgpg_start(struct bfa_s *bfa)
  3786. {
  3787. }
  3788. static void
  3789. bfa_sgpg_stop(struct bfa_s *bfa)
  3790. {
  3791. }
  3792. static void
  3793. bfa_sgpg_iocdisable(struct bfa_s *bfa)
  3794. {
  3795. }
  3796. bfa_status_t
  3797. bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
  3798. {
  3799. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  3800. struct bfa_sgpg_s *hsgpg;
  3801. int i;
  3802. bfa_trc_fp(bfa, nsgpgs);
  3803. if (mod->free_sgpgs < nsgpgs)
  3804. return BFA_STATUS_ENOMEM;
  3805. for (i = 0; i < nsgpgs; i++) {
  3806. bfa_q_deq(&mod->sgpg_q, &hsgpg);
  3807. bfa_assert(hsgpg);
  3808. list_add_tail(&hsgpg->qe, sgpg_q);
  3809. }
  3810. mod->free_sgpgs -= nsgpgs;
  3811. return BFA_STATUS_OK;
  3812. }
  3813. void
  3814. bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
  3815. {
  3816. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  3817. struct bfa_sgpg_wqe_s *wqe;
  3818. bfa_trc_fp(bfa, nsgpg);
  3819. mod->free_sgpgs += nsgpg;
  3820. bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
  3821. list_splice_tail_init(sgpg_q, &mod->sgpg_q);
  3822. if (list_empty(&mod->sgpg_wait_q))
  3823. return;
  3824. /*
  3825. * satisfy as many waiting requests as possible
  3826. */
  3827. do {
  3828. wqe = bfa_q_first(&mod->sgpg_wait_q);
  3829. if (mod->free_sgpgs < wqe->nsgpg)
  3830. nsgpg = mod->free_sgpgs;
  3831. else
  3832. nsgpg = wqe->nsgpg;
  3833. bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
  3834. wqe->nsgpg -= nsgpg;
  3835. if (wqe->nsgpg == 0) {
  3836. list_del(&wqe->qe);
  3837. wqe->cbfn(wqe->cbarg);
  3838. }
  3839. } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
  3840. }
  3841. void
  3842. bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
  3843. {
  3844. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  3845. bfa_assert(nsgpg > 0);
  3846. bfa_assert(nsgpg > mod->free_sgpgs);
  3847. wqe->nsgpg_total = wqe->nsgpg = nsgpg;
  3848. /*
  3849. * allocate any left to this one first
  3850. */
  3851. if (mod->free_sgpgs) {
  3852. /*
  3853. * no one else is waiting for SGPG
  3854. */
  3855. bfa_assert(list_empty(&mod->sgpg_wait_q));
  3856. list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
  3857. wqe->nsgpg -= mod->free_sgpgs;
  3858. mod->free_sgpgs = 0;
  3859. }
  3860. list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
  3861. }
  3862. void
  3863. bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
  3864. {
  3865. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  3866. bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
  3867. list_del(&wqe->qe);
  3868. if (wqe->nsgpg_total != wqe->nsgpg)
  3869. bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
  3870. wqe->nsgpg_total - wqe->nsgpg);
  3871. }
  3872. void
  3873. bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
  3874. void *cbarg)
  3875. {
  3876. INIT_LIST_HEAD(&wqe->sgpg_q);
  3877. wqe->cbfn = cbfn;
  3878. wqe->cbarg = cbarg;
  3879. }
  3880. /*
  3881. * UF related functions
  3882. */
  3883. /*
  3884. *****************************************************************************
  3885. * Internal functions
  3886. *****************************************************************************
  3887. */
  3888. static void
  3889. __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
  3890. {
  3891. struct bfa_uf_s *uf = cbarg;
  3892. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
  3893. if (complete)
  3894. ufm->ufrecv(ufm->cbarg, uf);
  3895. }
  3896. static void
  3897. claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
  3898. {
  3899. u32 uf_pb_tot_sz;
  3900. ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
  3901. ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
  3902. uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
  3903. BFA_DMA_ALIGN_SZ);
  3904. bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
  3905. bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
  3906. memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
  3907. }
  3908. static void
  3909. claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
  3910. {
  3911. struct bfi_uf_buf_post_s *uf_bp_msg;
  3912. struct bfi_sge_s *sge;
  3913. union bfi_addr_u sga_zero = { {0} };
  3914. u16 i;
  3915. u16 buf_len;
  3916. ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
  3917. uf_bp_msg = ufm->uf_buf_posts;
  3918. for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
  3919. i++, uf_bp_msg++) {
  3920. memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
  3921. uf_bp_msg->buf_tag = i;
  3922. buf_len = sizeof(struct bfa_uf_buf_s);
  3923. uf_bp_msg->buf_len = cpu_to_be16(buf_len);
  3924. bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
  3925. bfa_lpuid(ufm->bfa));
  3926. sge = uf_bp_msg->sge;
  3927. sge[0].sg_len = buf_len;
  3928. sge[0].flags = BFI_SGE_DATA_LAST;
  3929. bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
  3930. bfa_sge_to_be(sge);
  3931. sge[1].sg_len = buf_len;
  3932. sge[1].flags = BFI_SGE_PGDLEN;
  3933. sge[1].sga = sga_zero;
  3934. bfa_sge_to_be(&sge[1]);
  3935. }
  3936. /*
  3937. * advance pointer beyond consumed memory
  3938. */
  3939. bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
  3940. }
  3941. static void
  3942. claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
  3943. {
  3944. u16 i;
  3945. struct bfa_uf_s *uf;
  3946. /*
  3947. * Claim block of memory for UF list
  3948. */
  3949. ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
  3950. /*
  3951. * Initialize UFs and queue it in UF free queue
  3952. */
  3953. for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
  3954. memset(uf, 0, sizeof(struct bfa_uf_s));
  3955. uf->bfa = ufm->bfa;
  3956. uf->uf_tag = i;
  3957. uf->pb_len = sizeof(struct bfa_uf_buf_s);
  3958. uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
  3959. uf->buf_pa = ufm_pbs_pa(ufm, i);
  3960. list_add_tail(&uf->qe, &ufm->uf_free_q);
  3961. }
  3962. /*
  3963. * advance memory pointer
  3964. */
  3965. bfa_meminfo_kva(mi) = (u8 *) uf;
  3966. }
  3967. static void
  3968. uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
  3969. {
  3970. claim_uf_pbs(ufm, mi);
  3971. claim_ufs(ufm, mi);
  3972. claim_uf_post_msgs(ufm, mi);
  3973. }
  3974. static void
  3975. bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
  3976. {
  3977. u32 num_ufs = cfg->fwcfg.num_uf_bufs;
  3978. /*
  3979. * dma-able memory for UF posted bufs
  3980. */
  3981. *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
  3982. BFA_DMA_ALIGN_SZ);
  3983. /*
  3984. * kernel Virtual memory for UFs and UF buf post msg copies
  3985. */
  3986. *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
  3987. *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
  3988. }
  3989. static void
  3990. bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  3991. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  3992. {
  3993. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
  3994. memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
  3995. ufm->bfa = bfa;
  3996. ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
  3997. INIT_LIST_HEAD(&ufm->uf_free_q);
  3998. INIT_LIST_HEAD(&ufm->uf_posted_q);
  3999. uf_mem_claim(ufm, meminfo);
  4000. }
  4001. static void
  4002. bfa_uf_detach(struct bfa_s *bfa)
  4003. {
  4004. }
  4005. static struct bfa_uf_s *
  4006. bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
  4007. {
  4008. struct bfa_uf_s *uf;
  4009. bfa_q_deq(&uf_mod->uf_free_q, &uf);
  4010. return uf;
  4011. }
  4012. static void
  4013. bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
  4014. {
  4015. list_add_tail(&uf->qe, &uf_mod->uf_free_q);
  4016. }
  4017. static bfa_status_t
  4018. bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
  4019. {
  4020. struct bfi_uf_buf_post_s *uf_post_msg;
  4021. uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
  4022. if (!uf_post_msg)
  4023. return BFA_STATUS_FAILED;
  4024. memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
  4025. sizeof(struct bfi_uf_buf_post_s));
  4026. bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
  4027. bfa_trc(ufm->bfa, uf->uf_tag);
  4028. list_add_tail(&uf->qe, &ufm->uf_posted_q);
  4029. return BFA_STATUS_OK;
  4030. }
  4031. static void
  4032. bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
  4033. {
  4034. struct bfa_uf_s *uf;
  4035. while ((uf = bfa_uf_get(uf_mod)) != NULL) {
  4036. if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
  4037. break;
  4038. }
  4039. }
  4040. static void
  4041. uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
  4042. {
  4043. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
  4044. u16 uf_tag = m->buf_tag;
  4045. struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
  4046. struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
  4047. u8 *buf = &uf_buf->d[0];
  4048. struct fchs_s *fchs;
  4049. m->frm_len = be16_to_cpu(m->frm_len);
  4050. m->xfr_len = be16_to_cpu(m->xfr_len);
  4051. fchs = (struct fchs_s *)uf_buf;
  4052. list_del(&uf->qe); /* dequeue from posted queue */
  4053. uf->data_ptr = buf;
  4054. uf->data_len = m->xfr_len;
  4055. bfa_assert(uf->data_len >= sizeof(struct fchs_s));
  4056. if (uf->data_len == sizeof(struct fchs_s)) {
  4057. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
  4058. uf->data_len, (struct fchs_s *)buf);
  4059. } else {
  4060. u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
  4061. bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
  4062. BFA_PL_EID_RX, uf->data_len,
  4063. (struct fchs_s *)buf, pld_w0);
  4064. }
  4065. if (bfa->fcs)
  4066. __bfa_cb_uf_recv(uf, BFA_TRUE);
  4067. else
  4068. bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
  4069. }
  4070. static void
  4071. bfa_uf_stop(struct bfa_s *bfa)
  4072. {
  4073. }
  4074. static void
  4075. bfa_uf_iocdisable(struct bfa_s *bfa)
  4076. {
  4077. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
  4078. struct bfa_uf_s *uf;
  4079. struct list_head *qe, *qen;
  4080. list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
  4081. uf = (struct bfa_uf_s *) qe;
  4082. list_del(&uf->qe);
  4083. bfa_uf_put(ufm, uf);
  4084. }
  4085. }
  4086. static void
  4087. bfa_uf_start(struct bfa_s *bfa)
  4088. {
  4089. bfa_uf_post_all(BFA_UF_MOD(bfa));
  4090. }
  4091. /*
  4092. * Register handler for all unsolicted recieve frames.
  4093. *
  4094. * @param[in] bfa BFA instance
  4095. * @param[in] ufrecv receive handler function
  4096. * @param[in] cbarg receive handler arg
  4097. */
  4098. void
  4099. bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
  4100. {
  4101. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
  4102. ufm->ufrecv = ufrecv;
  4103. ufm->cbarg = cbarg;
  4104. }
  4105. /*
  4106. * Free an unsolicited frame back to BFA.
  4107. *
  4108. * @param[in] uf unsolicited frame to be freed
  4109. *
  4110. * @return None
  4111. */
  4112. void
  4113. bfa_uf_free(struct bfa_uf_s *uf)
  4114. {
  4115. bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
  4116. bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
  4117. }
  4118. /*
  4119. * uf_pub BFA uf module public functions
  4120. */
  4121. void
  4122. bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
  4123. {
  4124. bfa_trc(bfa, msg->mhdr.msg_id);
  4125. switch (msg->mhdr.msg_id) {
  4126. case BFI_UF_I2H_FRM_RCVD:
  4127. uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
  4128. break;
  4129. default:
  4130. bfa_trc(bfa, msg->mhdr.msg_id);
  4131. bfa_assert(0);
  4132. }
  4133. }