bfa_fcpim.c 105 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_modules.h"
  19. BFA_TRC_FILE(HAL, FCPIM);
  20. /*
  21. * BFA ITNIM Related definitions
  22. */
  23. static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
  24. static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
  25. static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
  26. static void bfa_ioim_lm_init(struct bfa_s *bfa);
  27. #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
  28. (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
  29. #define bfa_fcpim_additn(__itnim) \
  30. list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
  31. #define bfa_fcpim_delitn(__itnim) do { \
  32. WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
  33. bfa_itnim_update_del_itn_stats(__itnim); \
  34. list_del(&(__itnim)->qe); \
  35. WARN_ON(!list_empty(&(__itnim)->io_q)); \
  36. WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
  37. WARN_ON(!list_empty(&(__itnim)->pending_q)); \
  38. } while (0)
  39. #define bfa_itnim_online_cb(__itnim) do { \
  40. if ((__itnim)->bfa->fcs) \
  41. bfa_cb_itnim_online((__itnim)->ditn); \
  42. else { \
  43. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  44. __bfa_cb_itnim_online, (__itnim)); \
  45. } \
  46. } while (0)
  47. #define bfa_itnim_offline_cb(__itnim) do { \
  48. if ((__itnim)->bfa->fcs) \
  49. bfa_cb_itnim_offline((__itnim)->ditn); \
  50. else { \
  51. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  52. __bfa_cb_itnim_offline, (__itnim)); \
  53. } \
  54. } while (0)
  55. #define bfa_ioim_rp_wwn(__ioim) \
  56. (((struct bfa_fcs_rport_s *) \
  57. (__ioim)->itnim->rport->rport_drv)->pwwn)
  58. #define bfa_ioim_lp_wwn(__ioim) \
  59. ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
  60. (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
  61. #define bfa_itnim_sler_cb(__itnim) do { \
  62. if ((__itnim)->bfa->fcs) \
  63. bfa_cb_itnim_sler((__itnim)->ditn); \
  64. else { \
  65. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  66. __bfa_cb_itnim_sler, (__itnim)); \
  67. } \
  68. } while (0)
  69. enum bfa_ioim_lm_status {
  70. BFA_IOIM_LM_PRESENT = 1,
  71. BFA_IOIM_LM_LUN_NOT_SUP = 2,
  72. BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
  73. BFA_IOIM_LM_LUN_NOT_RDY = 4,
  74. };
  75. enum bfa_ioim_lm_ua_status {
  76. BFA_IOIM_LM_UA_RESET = 0,
  77. BFA_IOIM_LM_UA_SET = 1,
  78. };
  79. /*
  80. * itnim state machine event
  81. */
  82. enum bfa_itnim_event {
  83. BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
  84. BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
  85. BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
  86. BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
  87. BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
  88. BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
  89. BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
  90. BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
  91. BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
  92. };
  93. /*
  94. * BFA IOIM related definitions
  95. */
  96. #define bfa_ioim_move_to_comp_q(__ioim) do { \
  97. list_del(&(__ioim)->qe); \
  98. list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
  99. } while (0)
  100. #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
  101. if ((__fcpim)->profile_comp) \
  102. (__fcpim)->profile_comp(__ioim); \
  103. } while (0)
  104. #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
  105. if ((__fcpim)->profile_start) \
  106. (__fcpim)->profile_start(__ioim); \
  107. } while (0)
  108. /*
  109. * IO state machine events
  110. */
  111. enum bfa_ioim_event {
  112. BFA_IOIM_SM_START = 1, /* io start request from host */
  113. BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
  114. BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
  115. BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
  116. BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
  117. BFA_IOIM_SM_FREE = 6, /* io resource is freed */
  118. BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
  119. BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
  120. BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
  121. BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
  122. BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
  123. BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
  124. BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
  125. BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
  126. BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
  127. BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
  128. BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
  129. BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
  130. BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
  131. BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
  132. BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
  133. };
  134. /*
  135. * BFA TSKIM related definitions
  136. */
  137. /*
  138. * task management completion handling
  139. */
  140. #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
  141. bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
  142. bfa_tskim_notify_comp(__tskim); \
  143. } while (0)
  144. #define bfa_tskim_notify_comp(__tskim) do { \
  145. if ((__tskim)->notify) \
  146. bfa_itnim_tskdone((__tskim)->itnim); \
  147. } while (0)
  148. enum bfa_tskim_event {
  149. BFA_TSKIM_SM_START = 1, /* TM command start */
  150. BFA_TSKIM_SM_DONE = 2, /* TM completion */
  151. BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
  152. BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
  153. BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
  154. BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
  155. BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
  156. BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
  157. };
  158. /*
  159. * forward declaration for BFA ITNIM functions
  160. */
  161. static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
  162. static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
  163. static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
  164. static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
  165. static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
  166. static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
  167. static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
  168. static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
  169. static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
  170. static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
  171. static void bfa_itnim_iotov(void *itnim_arg);
  172. static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
  173. static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
  174. static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
  175. /*
  176. * forward declaration of ITNIM state machine
  177. */
  178. static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
  179. enum bfa_itnim_event event);
  180. static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
  181. enum bfa_itnim_event event);
  182. static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
  183. enum bfa_itnim_event event);
  184. static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
  185. enum bfa_itnim_event event);
  186. static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
  187. enum bfa_itnim_event event);
  188. static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
  189. enum bfa_itnim_event event);
  190. static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
  191. enum bfa_itnim_event event);
  192. static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
  193. enum bfa_itnim_event event);
  194. static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
  195. enum bfa_itnim_event event);
  196. static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
  197. enum bfa_itnim_event event);
  198. static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
  199. enum bfa_itnim_event event);
  200. static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
  201. enum bfa_itnim_event event);
  202. static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
  203. enum bfa_itnim_event event);
  204. static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
  205. enum bfa_itnim_event event);
  206. static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
  207. enum bfa_itnim_event event);
  208. /*
  209. * forward declaration for BFA IOIM functions
  210. */
  211. static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
  212. static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
  213. static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
  214. static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
  215. static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
  216. static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
  217. static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
  218. static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
  219. static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
  220. static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
  221. static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
  222. static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
  223. static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
  224. /*
  225. * forward declaration of BFA IO state machine
  226. */
  227. static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
  228. enum bfa_ioim_event event);
  229. static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
  230. enum bfa_ioim_event event);
  231. static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
  232. enum bfa_ioim_event event);
  233. static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
  234. enum bfa_ioim_event event);
  235. static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
  236. enum bfa_ioim_event event);
  237. static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
  238. enum bfa_ioim_event event);
  239. static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
  240. enum bfa_ioim_event event);
  241. static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
  242. enum bfa_ioim_event event);
  243. static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
  244. enum bfa_ioim_event event);
  245. static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
  246. enum bfa_ioim_event event);
  247. static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
  248. enum bfa_ioim_event event);
  249. static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
  250. enum bfa_ioim_event event);
  251. /*
  252. * forward declaration for BFA TSKIM functions
  253. */
  254. static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
  255. static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
  256. static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
  257. struct scsi_lun lun);
  258. static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
  259. static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
  260. static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
  261. static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
  262. static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
  263. static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
  264. /*
  265. * forward declaration of BFA TSKIM state machine
  266. */
  267. static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
  268. enum bfa_tskim_event event);
  269. static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
  270. enum bfa_tskim_event event);
  271. static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
  272. enum bfa_tskim_event event);
  273. static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
  274. enum bfa_tskim_event event);
  275. static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
  276. enum bfa_tskim_event event);
  277. static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
  278. enum bfa_tskim_event event);
  279. static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
  280. enum bfa_tskim_event event);
  281. /*
  282. * BFA FCP Initiator Mode module
  283. */
  284. /*
  285. * Compute and return memory needed by FCP(im) module.
  286. */
  287. static void
  288. bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
  289. {
  290. bfa_itnim_meminfo(cfg, km_len);
  291. /*
  292. * IO memory
  293. */
  294. *km_len += cfg->fwcfg.num_ioim_reqs *
  295. (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
  296. /*
  297. * task management command memory
  298. */
  299. if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
  300. cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
  301. *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
  302. }
  303. static void
  304. bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
  305. struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
  306. {
  307. struct bfa_fcpim_s *fcpim = &fcp->fcpim;
  308. struct bfa_s *bfa = fcp->bfa;
  309. bfa_trc(bfa, cfg->drvcfg.path_tov);
  310. bfa_trc(bfa, cfg->fwcfg.num_rports);
  311. bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
  312. bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
  313. fcpim->fcp = fcp;
  314. fcpim->bfa = bfa;
  315. fcpim->num_itnims = cfg->fwcfg.num_rports;
  316. fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
  317. fcpim->path_tov = cfg->drvcfg.path_tov;
  318. fcpim->delay_comp = cfg->drvcfg.delay_comp;
  319. fcpim->profile_comp = NULL;
  320. fcpim->profile_start = NULL;
  321. bfa_itnim_attach(fcpim);
  322. bfa_tskim_attach(fcpim);
  323. bfa_ioim_attach(fcpim);
  324. }
  325. static void
  326. bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
  327. {
  328. struct bfa_fcpim_s *fcpim = &fcp->fcpim;
  329. struct bfa_itnim_s *itnim;
  330. struct list_head *qe, *qen;
  331. /* Enqueue unused ioim resources to free_q */
  332. list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
  333. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  334. itnim = (struct bfa_itnim_s *) qe;
  335. bfa_itnim_iocdisable(itnim);
  336. }
  337. }
  338. void
  339. bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
  340. {
  341. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  342. fcpim->path_tov = path_tov * 1000;
  343. if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
  344. fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
  345. }
  346. u16
  347. bfa_fcpim_path_tov_get(struct bfa_s *bfa)
  348. {
  349. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  350. return fcpim->path_tov / 1000;
  351. }
  352. #define bfa_fcpim_add_iostats(__l, __r, __stats) \
  353. (__l->__stats += __r->__stats)
  354. void
  355. bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
  356. struct bfa_itnim_iostats_s *rstats)
  357. {
  358. bfa_fcpim_add_iostats(lstats, rstats, total_ios);
  359. bfa_fcpim_add_iostats(lstats, rstats, qresumes);
  360. bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
  361. bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
  362. bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
  363. bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
  364. bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
  365. bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
  366. bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
  367. bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
  368. bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
  369. bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
  370. bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
  371. bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
  372. bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
  373. bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
  374. bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
  375. bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
  376. bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
  377. bfa_fcpim_add_iostats(lstats, rstats, onlines);
  378. bfa_fcpim_add_iostats(lstats, rstats, offlines);
  379. bfa_fcpim_add_iostats(lstats, rstats, creates);
  380. bfa_fcpim_add_iostats(lstats, rstats, deletes);
  381. bfa_fcpim_add_iostats(lstats, rstats, create_comps);
  382. bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
  383. bfa_fcpim_add_iostats(lstats, rstats, sler_events);
  384. bfa_fcpim_add_iostats(lstats, rstats, fw_create);
  385. bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
  386. bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
  387. bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
  388. bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
  389. bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
  390. bfa_fcpim_add_iostats(lstats, rstats, tm_success);
  391. bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
  392. bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
  393. bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
  394. bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
  395. bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
  396. bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
  397. bfa_fcpim_add_iostats(lstats, rstats, io_comps);
  398. bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
  399. bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
  400. bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
  401. bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
  402. bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
  403. bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
  404. bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
  405. bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
  406. bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
  407. bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
  408. }
  409. bfa_status_t
  410. bfa_fcpim_port_iostats(struct bfa_s *bfa,
  411. struct bfa_itnim_iostats_s *stats, u8 lp_tag)
  412. {
  413. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  414. struct list_head *qe, *qen;
  415. struct bfa_itnim_s *itnim;
  416. /* accumulate IO stats from itnim */
  417. memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
  418. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  419. itnim = (struct bfa_itnim_s *) qe;
  420. if (itnim->rport->rport_info.lp_tag != lp_tag)
  421. continue;
  422. bfa_fcpim_add_stats(stats, &(itnim->stats));
  423. }
  424. return BFA_STATUS_OK;
  425. }
  426. void
  427. bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
  428. {
  429. struct bfa_itnim_latency_s *io_lat =
  430. &(ioim->itnim->ioprofile.io_latency);
  431. u32 val, idx;
  432. val = (u32)(jiffies - ioim->start_time);
  433. idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
  434. bfa_itnim_ioprofile_update(ioim->itnim, idx);
  435. io_lat->count[idx]++;
  436. io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
  437. io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
  438. io_lat->avg[idx] += val;
  439. }
  440. void
  441. bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
  442. {
  443. ioim->start_time = jiffies;
  444. }
  445. bfa_status_t
  446. bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
  447. {
  448. struct bfa_itnim_s *itnim;
  449. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  450. struct list_head *qe, *qen;
  451. /* accumulate IO stats from itnim */
  452. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  453. itnim = (struct bfa_itnim_s *) qe;
  454. bfa_itnim_clear_stats(itnim);
  455. }
  456. fcpim->io_profile = BFA_TRUE;
  457. fcpim->io_profile_start_time = time;
  458. fcpim->profile_comp = bfa_ioim_profile_comp;
  459. fcpim->profile_start = bfa_ioim_profile_start;
  460. return BFA_STATUS_OK;
  461. }
  462. bfa_status_t
  463. bfa_fcpim_profile_off(struct bfa_s *bfa)
  464. {
  465. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  466. fcpim->io_profile = BFA_FALSE;
  467. fcpim->io_profile_start_time = 0;
  468. fcpim->profile_comp = NULL;
  469. fcpim->profile_start = NULL;
  470. return BFA_STATUS_OK;
  471. }
  472. u16
  473. bfa_fcpim_qdepth_get(struct bfa_s *bfa)
  474. {
  475. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  476. return fcpim->q_depth;
  477. }
  478. /*
  479. * BFA ITNIM module state machine functions
  480. */
  481. /*
  482. * Beginning/unallocated state - no events expected.
  483. */
  484. static void
  485. bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  486. {
  487. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  488. bfa_trc(itnim->bfa, event);
  489. switch (event) {
  490. case BFA_ITNIM_SM_CREATE:
  491. bfa_sm_set_state(itnim, bfa_itnim_sm_created);
  492. itnim->is_online = BFA_FALSE;
  493. bfa_fcpim_additn(itnim);
  494. break;
  495. default:
  496. bfa_sm_fault(itnim->bfa, event);
  497. }
  498. }
  499. /*
  500. * Beginning state, only online event expected.
  501. */
  502. static void
  503. bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  504. {
  505. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  506. bfa_trc(itnim->bfa, event);
  507. switch (event) {
  508. case BFA_ITNIM_SM_ONLINE:
  509. if (bfa_itnim_send_fwcreate(itnim))
  510. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  511. else
  512. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  513. break;
  514. case BFA_ITNIM_SM_DELETE:
  515. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  516. bfa_fcpim_delitn(itnim);
  517. break;
  518. case BFA_ITNIM_SM_HWFAIL:
  519. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  520. break;
  521. default:
  522. bfa_sm_fault(itnim->bfa, event);
  523. }
  524. }
  525. /*
  526. * Waiting for itnim create response from firmware.
  527. */
  528. static void
  529. bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  530. {
  531. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  532. bfa_trc(itnim->bfa, event);
  533. switch (event) {
  534. case BFA_ITNIM_SM_FWRSP:
  535. bfa_sm_set_state(itnim, bfa_itnim_sm_online);
  536. itnim->is_online = BFA_TRUE;
  537. bfa_itnim_iotov_online(itnim);
  538. bfa_itnim_online_cb(itnim);
  539. break;
  540. case BFA_ITNIM_SM_DELETE:
  541. bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
  542. break;
  543. case BFA_ITNIM_SM_OFFLINE:
  544. if (bfa_itnim_send_fwdelete(itnim))
  545. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  546. else
  547. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
  548. break;
  549. case BFA_ITNIM_SM_HWFAIL:
  550. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  551. break;
  552. default:
  553. bfa_sm_fault(itnim->bfa, event);
  554. }
  555. }
  556. static void
  557. bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
  558. enum bfa_itnim_event event)
  559. {
  560. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  561. bfa_trc(itnim->bfa, event);
  562. switch (event) {
  563. case BFA_ITNIM_SM_QRESUME:
  564. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  565. bfa_itnim_send_fwcreate(itnim);
  566. break;
  567. case BFA_ITNIM_SM_DELETE:
  568. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  569. bfa_reqq_wcancel(&itnim->reqq_wait);
  570. bfa_fcpim_delitn(itnim);
  571. break;
  572. case BFA_ITNIM_SM_OFFLINE:
  573. bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
  574. bfa_reqq_wcancel(&itnim->reqq_wait);
  575. bfa_itnim_offline_cb(itnim);
  576. break;
  577. case BFA_ITNIM_SM_HWFAIL:
  578. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  579. bfa_reqq_wcancel(&itnim->reqq_wait);
  580. break;
  581. default:
  582. bfa_sm_fault(itnim->bfa, event);
  583. }
  584. }
  585. /*
  586. * Waiting for itnim create response from firmware, a delete is pending.
  587. */
  588. static void
  589. bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
  590. enum bfa_itnim_event event)
  591. {
  592. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  593. bfa_trc(itnim->bfa, event);
  594. switch (event) {
  595. case BFA_ITNIM_SM_FWRSP:
  596. if (bfa_itnim_send_fwdelete(itnim))
  597. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  598. else
  599. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  600. break;
  601. case BFA_ITNIM_SM_HWFAIL:
  602. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  603. bfa_fcpim_delitn(itnim);
  604. break;
  605. default:
  606. bfa_sm_fault(itnim->bfa, event);
  607. }
  608. }
  609. /*
  610. * Online state - normal parking state.
  611. */
  612. static void
  613. bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  614. {
  615. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  616. bfa_trc(itnim->bfa, event);
  617. switch (event) {
  618. case BFA_ITNIM_SM_OFFLINE:
  619. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
  620. itnim->is_online = BFA_FALSE;
  621. bfa_itnim_iotov_start(itnim);
  622. bfa_itnim_cleanup(itnim);
  623. break;
  624. case BFA_ITNIM_SM_DELETE:
  625. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  626. itnim->is_online = BFA_FALSE;
  627. bfa_itnim_cleanup(itnim);
  628. break;
  629. case BFA_ITNIM_SM_SLER:
  630. bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
  631. itnim->is_online = BFA_FALSE;
  632. bfa_itnim_iotov_start(itnim);
  633. bfa_itnim_sler_cb(itnim);
  634. break;
  635. case BFA_ITNIM_SM_HWFAIL:
  636. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  637. itnim->is_online = BFA_FALSE;
  638. bfa_itnim_iotov_start(itnim);
  639. bfa_itnim_iocdisable_cleanup(itnim);
  640. break;
  641. default:
  642. bfa_sm_fault(itnim->bfa, event);
  643. }
  644. }
  645. /*
  646. * Second level error recovery need.
  647. */
  648. static void
  649. bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  650. {
  651. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  652. bfa_trc(itnim->bfa, event);
  653. switch (event) {
  654. case BFA_ITNIM_SM_OFFLINE:
  655. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
  656. bfa_itnim_cleanup(itnim);
  657. break;
  658. case BFA_ITNIM_SM_DELETE:
  659. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  660. bfa_itnim_cleanup(itnim);
  661. bfa_itnim_iotov_delete(itnim);
  662. break;
  663. case BFA_ITNIM_SM_HWFAIL:
  664. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  665. bfa_itnim_iocdisable_cleanup(itnim);
  666. break;
  667. default:
  668. bfa_sm_fault(itnim->bfa, event);
  669. }
  670. }
  671. /*
  672. * Going offline. Waiting for active IO cleanup.
  673. */
  674. static void
  675. bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
  676. enum bfa_itnim_event event)
  677. {
  678. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  679. bfa_trc(itnim->bfa, event);
  680. switch (event) {
  681. case BFA_ITNIM_SM_CLEANUP:
  682. if (bfa_itnim_send_fwdelete(itnim))
  683. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  684. else
  685. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
  686. break;
  687. case BFA_ITNIM_SM_DELETE:
  688. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  689. bfa_itnim_iotov_delete(itnim);
  690. break;
  691. case BFA_ITNIM_SM_HWFAIL:
  692. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  693. bfa_itnim_iocdisable_cleanup(itnim);
  694. bfa_itnim_offline_cb(itnim);
  695. break;
  696. case BFA_ITNIM_SM_SLER:
  697. break;
  698. default:
  699. bfa_sm_fault(itnim->bfa, event);
  700. }
  701. }
  702. /*
  703. * Deleting itnim. Waiting for active IO cleanup.
  704. */
  705. static void
  706. bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
  707. enum bfa_itnim_event event)
  708. {
  709. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  710. bfa_trc(itnim->bfa, event);
  711. switch (event) {
  712. case BFA_ITNIM_SM_CLEANUP:
  713. if (bfa_itnim_send_fwdelete(itnim))
  714. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  715. else
  716. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  717. break;
  718. case BFA_ITNIM_SM_HWFAIL:
  719. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  720. bfa_itnim_iocdisable_cleanup(itnim);
  721. break;
  722. default:
  723. bfa_sm_fault(itnim->bfa, event);
  724. }
  725. }
  726. /*
  727. * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
  728. */
  729. static void
  730. bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  731. {
  732. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  733. bfa_trc(itnim->bfa, event);
  734. switch (event) {
  735. case BFA_ITNIM_SM_FWRSP:
  736. bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
  737. bfa_itnim_offline_cb(itnim);
  738. break;
  739. case BFA_ITNIM_SM_DELETE:
  740. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  741. break;
  742. case BFA_ITNIM_SM_HWFAIL:
  743. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  744. bfa_itnim_offline_cb(itnim);
  745. break;
  746. default:
  747. bfa_sm_fault(itnim->bfa, event);
  748. }
  749. }
  750. static void
  751. bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
  752. enum bfa_itnim_event event)
  753. {
  754. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  755. bfa_trc(itnim->bfa, event);
  756. switch (event) {
  757. case BFA_ITNIM_SM_QRESUME:
  758. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  759. bfa_itnim_send_fwdelete(itnim);
  760. break;
  761. case BFA_ITNIM_SM_DELETE:
  762. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  763. break;
  764. case BFA_ITNIM_SM_HWFAIL:
  765. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  766. bfa_reqq_wcancel(&itnim->reqq_wait);
  767. bfa_itnim_offline_cb(itnim);
  768. break;
  769. default:
  770. bfa_sm_fault(itnim->bfa, event);
  771. }
  772. }
  773. /*
  774. * Offline state.
  775. */
  776. static void
  777. bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  778. {
  779. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  780. bfa_trc(itnim->bfa, event);
  781. switch (event) {
  782. case BFA_ITNIM_SM_DELETE:
  783. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  784. bfa_itnim_iotov_delete(itnim);
  785. bfa_fcpim_delitn(itnim);
  786. break;
  787. case BFA_ITNIM_SM_ONLINE:
  788. if (bfa_itnim_send_fwcreate(itnim))
  789. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  790. else
  791. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  792. break;
  793. case BFA_ITNIM_SM_HWFAIL:
  794. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  795. break;
  796. default:
  797. bfa_sm_fault(itnim->bfa, event);
  798. }
  799. }
  800. static void
  801. bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
  802. enum bfa_itnim_event event)
  803. {
  804. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  805. bfa_trc(itnim->bfa, event);
  806. switch (event) {
  807. case BFA_ITNIM_SM_DELETE:
  808. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  809. bfa_itnim_iotov_delete(itnim);
  810. bfa_fcpim_delitn(itnim);
  811. break;
  812. case BFA_ITNIM_SM_OFFLINE:
  813. bfa_itnim_offline_cb(itnim);
  814. break;
  815. case BFA_ITNIM_SM_ONLINE:
  816. if (bfa_itnim_send_fwcreate(itnim))
  817. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  818. else
  819. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  820. break;
  821. case BFA_ITNIM_SM_HWFAIL:
  822. break;
  823. default:
  824. bfa_sm_fault(itnim->bfa, event);
  825. }
  826. }
  827. /*
  828. * Itnim is deleted, waiting for firmware response to delete.
  829. */
  830. static void
  831. bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  832. {
  833. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  834. bfa_trc(itnim->bfa, event);
  835. switch (event) {
  836. case BFA_ITNIM_SM_FWRSP:
  837. case BFA_ITNIM_SM_HWFAIL:
  838. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  839. bfa_fcpim_delitn(itnim);
  840. break;
  841. default:
  842. bfa_sm_fault(itnim->bfa, event);
  843. }
  844. }
  845. static void
  846. bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
  847. enum bfa_itnim_event event)
  848. {
  849. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  850. bfa_trc(itnim->bfa, event);
  851. switch (event) {
  852. case BFA_ITNIM_SM_QRESUME:
  853. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  854. bfa_itnim_send_fwdelete(itnim);
  855. break;
  856. case BFA_ITNIM_SM_HWFAIL:
  857. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  858. bfa_reqq_wcancel(&itnim->reqq_wait);
  859. bfa_fcpim_delitn(itnim);
  860. break;
  861. default:
  862. bfa_sm_fault(itnim->bfa, event);
  863. }
  864. }
  865. /*
  866. * Initiate cleanup of all IOs on an IOC failure.
  867. */
  868. static void
  869. bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
  870. {
  871. struct bfa_tskim_s *tskim;
  872. struct bfa_ioim_s *ioim;
  873. struct list_head *qe, *qen;
  874. list_for_each_safe(qe, qen, &itnim->tsk_q) {
  875. tskim = (struct bfa_tskim_s *) qe;
  876. bfa_tskim_iocdisable(tskim);
  877. }
  878. list_for_each_safe(qe, qen, &itnim->io_q) {
  879. ioim = (struct bfa_ioim_s *) qe;
  880. bfa_ioim_iocdisable(ioim);
  881. }
  882. /*
  883. * For IO request in pending queue, we pretend an early timeout.
  884. */
  885. list_for_each_safe(qe, qen, &itnim->pending_q) {
  886. ioim = (struct bfa_ioim_s *) qe;
  887. bfa_ioim_tov(ioim);
  888. }
  889. list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
  890. ioim = (struct bfa_ioim_s *) qe;
  891. bfa_ioim_iocdisable(ioim);
  892. }
  893. }
  894. /*
  895. * IO cleanup completion
  896. */
  897. static void
  898. bfa_itnim_cleanp_comp(void *itnim_cbarg)
  899. {
  900. struct bfa_itnim_s *itnim = itnim_cbarg;
  901. bfa_stats(itnim, cleanup_comps);
  902. bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
  903. }
  904. /*
  905. * Initiate cleanup of all IOs.
  906. */
  907. static void
  908. bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
  909. {
  910. struct bfa_ioim_s *ioim;
  911. struct bfa_tskim_s *tskim;
  912. struct list_head *qe, *qen;
  913. bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
  914. list_for_each_safe(qe, qen, &itnim->io_q) {
  915. ioim = (struct bfa_ioim_s *) qe;
  916. /*
  917. * Move IO to a cleanup queue from active queue so that a later
  918. * TM will not pickup this IO.
  919. */
  920. list_del(&ioim->qe);
  921. list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
  922. bfa_wc_up(&itnim->wc);
  923. bfa_ioim_cleanup(ioim);
  924. }
  925. list_for_each_safe(qe, qen, &itnim->tsk_q) {
  926. tskim = (struct bfa_tskim_s *) qe;
  927. bfa_wc_up(&itnim->wc);
  928. bfa_tskim_cleanup(tskim);
  929. }
  930. bfa_wc_wait(&itnim->wc);
  931. }
  932. static void
  933. __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
  934. {
  935. struct bfa_itnim_s *itnim = cbarg;
  936. if (complete)
  937. bfa_cb_itnim_online(itnim->ditn);
  938. }
  939. static void
  940. __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
  941. {
  942. struct bfa_itnim_s *itnim = cbarg;
  943. if (complete)
  944. bfa_cb_itnim_offline(itnim->ditn);
  945. }
  946. static void
  947. __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
  948. {
  949. struct bfa_itnim_s *itnim = cbarg;
  950. if (complete)
  951. bfa_cb_itnim_sler(itnim->ditn);
  952. }
  953. /*
  954. * Call to resume any I/O requests waiting for room in request queue.
  955. */
  956. static void
  957. bfa_itnim_qresume(void *cbarg)
  958. {
  959. struct bfa_itnim_s *itnim = cbarg;
  960. bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
  961. }
  962. /*
  963. * bfa_itnim_public
  964. */
  965. void
  966. bfa_itnim_iodone(struct bfa_itnim_s *itnim)
  967. {
  968. bfa_wc_down(&itnim->wc);
  969. }
  970. void
  971. bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
  972. {
  973. bfa_wc_down(&itnim->wc);
  974. }
  975. void
  976. bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
  977. {
  978. /*
  979. * ITN memory
  980. */
  981. *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
  982. }
  983. void
  984. bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
  985. {
  986. struct bfa_s *bfa = fcpim->bfa;
  987. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  988. struct bfa_itnim_s *itnim;
  989. int i, j;
  990. INIT_LIST_HEAD(&fcpim->itnim_q);
  991. itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
  992. fcpim->itnim_arr = itnim;
  993. for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
  994. memset(itnim, 0, sizeof(struct bfa_itnim_s));
  995. itnim->bfa = bfa;
  996. itnim->fcpim = fcpim;
  997. itnim->reqq = BFA_REQQ_QOS_LO;
  998. itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
  999. itnim->iotov_active = BFA_FALSE;
  1000. bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
  1001. INIT_LIST_HEAD(&itnim->io_q);
  1002. INIT_LIST_HEAD(&itnim->io_cleanup_q);
  1003. INIT_LIST_HEAD(&itnim->pending_q);
  1004. INIT_LIST_HEAD(&itnim->tsk_q);
  1005. INIT_LIST_HEAD(&itnim->delay_comp_q);
  1006. for (j = 0; j < BFA_IOBUCKET_MAX; j++)
  1007. itnim->ioprofile.io_latency.min[j] = ~0;
  1008. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  1009. }
  1010. bfa_mem_kva_curp(fcp) = (u8 *) itnim;
  1011. }
  1012. void
  1013. bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
  1014. {
  1015. bfa_stats(itnim, ioc_disabled);
  1016. bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
  1017. }
  1018. static bfa_boolean_t
  1019. bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
  1020. {
  1021. struct bfi_itn_create_req_s *m;
  1022. itnim->msg_no++;
  1023. /*
  1024. * check for room in queue to send request now
  1025. */
  1026. m = bfa_reqq_next(itnim->bfa, itnim->reqq);
  1027. if (!m) {
  1028. bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
  1029. return BFA_FALSE;
  1030. }
  1031. bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
  1032. bfa_fn_lpu(itnim->bfa));
  1033. m->fw_handle = itnim->rport->fw_handle;
  1034. m->class = FC_CLASS_3;
  1035. m->seq_rec = itnim->seq_rec;
  1036. m->msg_no = itnim->msg_no;
  1037. bfa_stats(itnim, fw_create);
  1038. /*
  1039. * queue I/O message to firmware
  1040. */
  1041. bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
  1042. return BFA_TRUE;
  1043. }
  1044. static bfa_boolean_t
  1045. bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
  1046. {
  1047. struct bfi_itn_delete_req_s *m;
  1048. /*
  1049. * check for room in queue to send request now
  1050. */
  1051. m = bfa_reqq_next(itnim->bfa, itnim->reqq);
  1052. if (!m) {
  1053. bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
  1054. return BFA_FALSE;
  1055. }
  1056. bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
  1057. bfa_fn_lpu(itnim->bfa));
  1058. m->fw_handle = itnim->rport->fw_handle;
  1059. bfa_stats(itnim, fw_delete);
  1060. /*
  1061. * queue I/O message to firmware
  1062. */
  1063. bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
  1064. return BFA_TRUE;
  1065. }
  1066. /*
  1067. * Cleanup all pending failed inflight requests.
  1068. */
  1069. static void
  1070. bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
  1071. {
  1072. struct bfa_ioim_s *ioim;
  1073. struct list_head *qe, *qen;
  1074. list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
  1075. ioim = (struct bfa_ioim_s *)qe;
  1076. bfa_ioim_delayed_comp(ioim, iotov);
  1077. }
  1078. }
  1079. /*
  1080. * Start all pending IO requests.
  1081. */
  1082. static void
  1083. bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
  1084. {
  1085. struct bfa_ioim_s *ioim;
  1086. bfa_itnim_iotov_stop(itnim);
  1087. /*
  1088. * Abort all inflight IO requests in the queue
  1089. */
  1090. bfa_itnim_delayed_comp(itnim, BFA_FALSE);
  1091. /*
  1092. * Start all pending IO requests.
  1093. */
  1094. while (!list_empty(&itnim->pending_q)) {
  1095. bfa_q_deq(&itnim->pending_q, &ioim);
  1096. list_add_tail(&ioim->qe, &itnim->io_q);
  1097. bfa_ioim_start(ioim);
  1098. }
  1099. }
  1100. /*
  1101. * Fail all pending IO requests
  1102. */
  1103. static void
  1104. bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
  1105. {
  1106. struct bfa_ioim_s *ioim;
  1107. /*
  1108. * Fail all inflight IO requests in the queue
  1109. */
  1110. bfa_itnim_delayed_comp(itnim, BFA_TRUE);
  1111. /*
  1112. * Fail any pending IO requests.
  1113. */
  1114. while (!list_empty(&itnim->pending_q)) {
  1115. bfa_q_deq(&itnim->pending_q, &ioim);
  1116. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  1117. bfa_ioim_tov(ioim);
  1118. }
  1119. }
  1120. /*
  1121. * IO TOV timer callback. Fail any pending IO requests.
  1122. */
  1123. static void
  1124. bfa_itnim_iotov(void *itnim_arg)
  1125. {
  1126. struct bfa_itnim_s *itnim = itnim_arg;
  1127. itnim->iotov_active = BFA_FALSE;
  1128. bfa_cb_itnim_tov_begin(itnim->ditn);
  1129. bfa_itnim_iotov_cleanup(itnim);
  1130. bfa_cb_itnim_tov(itnim->ditn);
  1131. }
  1132. /*
  1133. * Start IO TOV timer for failing back pending IO requests in offline state.
  1134. */
  1135. static void
  1136. bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
  1137. {
  1138. if (itnim->fcpim->path_tov > 0) {
  1139. itnim->iotov_active = BFA_TRUE;
  1140. WARN_ON(!bfa_itnim_hold_io(itnim));
  1141. bfa_timer_start(itnim->bfa, &itnim->timer,
  1142. bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
  1143. }
  1144. }
  1145. /*
  1146. * Stop IO TOV timer.
  1147. */
  1148. static void
  1149. bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
  1150. {
  1151. if (itnim->iotov_active) {
  1152. itnim->iotov_active = BFA_FALSE;
  1153. bfa_timer_stop(&itnim->timer);
  1154. }
  1155. }
  1156. /*
  1157. * Stop IO TOV timer.
  1158. */
  1159. static void
  1160. bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
  1161. {
  1162. bfa_boolean_t pathtov_active = BFA_FALSE;
  1163. if (itnim->iotov_active)
  1164. pathtov_active = BFA_TRUE;
  1165. bfa_itnim_iotov_stop(itnim);
  1166. if (pathtov_active)
  1167. bfa_cb_itnim_tov_begin(itnim->ditn);
  1168. bfa_itnim_iotov_cleanup(itnim);
  1169. if (pathtov_active)
  1170. bfa_cb_itnim_tov(itnim->ditn);
  1171. }
  1172. static void
  1173. bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
  1174. {
  1175. struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
  1176. fcpim->del_itn_stats.del_itn_iocomp_aborted +=
  1177. itnim->stats.iocomp_aborted;
  1178. fcpim->del_itn_stats.del_itn_iocomp_timedout +=
  1179. itnim->stats.iocomp_timedout;
  1180. fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
  1181. itnim->stats.iocom_sqer_needed;
  1182. fcpim->del_itn_stats.del_itn_iocom_res_free +=
  1183. itnim->stats.iocom_res_free;
  1184. fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
  1185. itnim->stats.iocom_hostabrts;
  1186. fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
  1187. fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
  1188. fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
  1189. }
  1190. /*
  1191. * bfa_itnim_public
  1192. */
  1193. /*
  1194. * Itnim interrupt processing.
  1195. */
  1196. void
  1197. bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  1198. {
  1199. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  1200. union bfi_itn_i2h_msg_u msg;
  1201. struct bfa_itnim_s *itnim;
  1202. bfa_trc(bfa, m->mhdr.msg_id);
  1203. msg.msg = m;
  1204. switch (m->mhdr.msg_id) {
  1205. case BFI_ITN_I2H_CREATE_RSP:
  1206. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1207. msg.create_rsp->bfa_handle);
  1208. WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
  1209. bfa_stats(itnim, create_comps);
  1210. bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
  1211. break;
  1212. case BFI_ITN_I2H_DELETE_RSP:
  1213. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1214. msg.delete_rsp->bfa_handle);
  1215. WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
  1216. bfa_stats(itnim, delete_comps);
  1217. bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
  1218. break;
  1219. case BFI_ITN_I2H_SLER_EVENT:
  1220. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1221. msg.sler_event->bfa_handle);
  1222. bfa_stats(itnim, sler_events);
  1223. bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
  1224. break;
  1225. default:
  1226. bfa_trc(bfa, m->mhdr.msg_id);
  1227. WARN_ON(1);
  1228. }
  1229. }
  1230. /*
  1231. * bfa_itnim_api
  1232. */
  1233. struct bfa_itnim_s *
  1234. bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
  1235. {
  1236. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  1237. struct bfa_itnim_s *itnim;
  1238. bfa_itn_create(bfa, rport, bfa_itnim_isr);
  1239. itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
  1240. WARN_ON(itnim->rport != rport);
  1241. itnim->ditn = ditn;
  1242. bfa_stats(itnim, creates);
  1243. bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
  1244. return itnim;
  1245. }
  1246. void
  1247. bfa_itnim_delete(struct bfa_itnim_s *itnim)
  1248. {
  1249. bfa_stats(itnim, deletes);
  1250. bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
  1251. }
  1252. void
  1253. bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
  1254. {
  1255. itnim->seq_rec = seq_rec;
  1256. bfa_stats(itnim, onlines);
  1257. bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
  1258. }
  1259. void
  1260. bfa_itnim_offline(struct bfa_itnim_s *itnim)
  1261. {
  1262. bfa_stats(itnim, offlines);
  1263. bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
  1264. }
  1265. /*
  1266. * Return true if itnim is considered offline for holding off IO request.
  1267. * IO is not held if itnim is being deleted.
  1268. */
  1269. bfa_boolean_t
  1270. bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
  1271. {
  1272. return itnim->fcpim->path_tov && itnim->iotov_active &&
  1273. (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
  1274. bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
  1275. bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
  1276. bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
  1277. bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
  1278. bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
  1279. }
  1280. #define bfa_io_lat_clock_res_div HZ
  1281. #define bfa_io_lat_clock_res_mul 1000
  1282. bfa_status_t
  1283. bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
  1284. struct bfa_itnim_ioprofile_s *ioprofile)
  1285. {
  1286. struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
  1287. if (!fcpim->io_profile)
  1288. return BFA_STATUS_IOPROFILE_OFF;
  1289. itnim->ioprofile.index = BFA_IOBUCKET_MAX;
  1290. itnim->ioprofile.io_profile_start_time =
  1291. bfa_io_profile_start_time(itnim->bfa);
  1292. itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
  1293. itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
  1294. *ioprofile = itnim->ioprofile;
  1295. return BFA_STATUS_OK;
  1296. }
  1297. void
  1298. bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
  1299. {
  1300. int j;
  1301. memset(&itnim->stats, 0, sizeof(itnim->stats));
  1302. memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
  1303. for (j = 0; j < BFA_IOBUCKET_MAX; j++)
  1304. itnim->ioprofile.io_latency.min[j] = ~0;
  1305. }
  1306. /*
  1307. * BFA IO module state machine functions
  1308. */
  1309. /*
  1310. * IO is not started (unallocated).
  1311. */
  1312. static void
  1313. bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1314. {
  1315. switch (event) {
  1316. case BFA_IOIM_SM_START:
  1317. if (!bfa_itnim_is_online(ioim->itnim)) {
  1318. if (!bfa_itnim_hold_io(ioim->itnim)) {
  1319. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1320. list_del(&ioim->qe);
  1321. list_add_tail(&ioim->qe,
  1322. &ioim->fcpim->ioim_comp_q);
  1323. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1324. __bfa_cb_ioim_pathtov, ioim);
  1325. } else {
  1326. list_del(&ioim->qe);
  1327. list_add_tail(&ioim->qe,
  1328. &ioim->itnim->pending_q);
  1329. }
  1330. break;
  1331. }
  1332. if (ioim->nsges > BFI_SGE_INLINE) {
  1333. if (!bfa_ioim_sgpg_alloc(ioim)) {
  1334. bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
  1335. return;
  1336. }
  1337. }
  1338. if (!bfa_ioim_send_ioreq(ioim)) {
  1339. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1340. break;
  1341. }
  1342. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1343. break;
  1344. case BFA_IOIM_SM_IOTOV:
  1345. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1346. bfa_ioim_move_to_comp_q(ioim);
  1347. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1348. __bfa_cb_ioim_pathtov, ioim);
  1349. break;
  1350. case BFA_IOIM_SM_ABORT:
  1351. /*
  1352. * IO in pending queue can get abort requests. Complete abort
  1353. * requests immediately.
  1354. */
  1355. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1356. WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
  1357. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1358. __bfa_cb_ioim_abort, ioim);
  1359. break;
  1360. case BFA_IOIM_SM_LM_LUN_NOT_SUP:
  1361. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1362. bfa_ioim_move_to_comp_q(ioim);
  1363. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1364. __bfa_cb_ioim_lm_lun_not_sup, ioim);
  1365. break;
  1366. case BFA_IOIM_SM_LM_RPL_DC:
  1367. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1368. bfa_ioim_move_to_comp_q(ioim);
  1369. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1370. __bfa_cb_ioim_lm_rpl_dc, ioim);
  1371. break;
  1372. case BFA_IOIM_SM_LM_LUN_NOT_RDY:
  1373. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1374. bfa_ioim_move_to_comp_q(ioim);
  1375. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1376. __bfa_cb_ioim_lm_lun_not_rdy, ioim);
  1377. break;
  1378. default:
  1379. bfa_sm_fault(ioim->bfa, event);
  1380. }
  1381. }
  1382. /*
  1383. * IO is waiting for SG pages.
  1384. */
  1385. static void
  1386. bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1387. {
  1388. bfa_trc(ioim->bfa, ioim->iotag);
  1389. bfa_trc(ioim->bfa, event);
  1390. switch (event) {
  1391. case BFA_IOIM_SM_SGALLOCED:
  1392. if (!bfa_ioim_send_ioreq(ioim)) {
  1393. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1394. break;
  1395. }
  1396. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1397. break;
  1398. case BFA_IOIM_SM_CLEANUP:
  1399. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1400. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1401. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1402. ioim);
  1403. bfa_ioim_notify_cleanup(ioim);
  1404. break;
  1405. case BFA_IOIM_SM_ABORT:
  1406. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1407. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1408. bfa_ioim_move_to_comp_q(ioim);
  1409. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1410. ioim);
  1411. break;
  1412. case BFA_IOIM_SM_HWFAIL:
  1413. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1414. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1415. bfa_ioim_move_to_comp_q(ioim);
  1416. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1417. ioim);
  1418. break;
  1419. default:
  1420. bfa_sm_fault(ioim->bfa, event);
  1421. }
  1422. }
  1423. /*
  1424. * IO is active.
  1425. */
  1426. static void
  1427. bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1428. {
  1429. switch (event) {
  1430. case BFA_IOIM_SM_COMP_GOOD:
  1431. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1432. bfa_ioim_move_to_comp_q(ioim);
  1433. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1434. __bfa_cb_ioim_good_comp, ioim);
  1435. break;
  1436. case BFA_IOIM_SM_COMP:
  1437. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1438. bfa_ioim_move_to_comp_q(ioim);
  1439. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  1440. ioim);
  1441. break;
  1442. case BFA_IOIM_SM_DONE:
  1443. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1444. bfa_ioim_move_to_comp_q(ioim);
  1445. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  1446. ioim);
  1447. break;
  1448. case BFA_IOIM_SM_ABORT:
  1449. ioim->iosp->abort_explicit = BFA_TRUE;
  1450. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1451. if (bfa_ioim_send_abort(ioim))
  1452. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  1453. else {
  1454. bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
  1455. bfa_stats(ioim->itnim, qwait);
  1456. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1457. &ioim->iosp->reqq_wait);
  1458. }
  1459. break;
  1460. case BFA_IOIM_SM_CLEANUP:
  1461. ioim->iosp->abort_explicit = BFA_FALSE;
  1462. ioim->io_cbfn = __bfa_cb_ioim_failed;
  1463. if (bfa_ioim_send_abort(ioim))
  1464. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1465. else {
  1466. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1467. bfa_stats(ioim->itnim, qwait);
  1468. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1469. &ioim->iosp->reqq_wait);
  1470. }
  1471. break;
  1472. case BFA_IOIM_SM_HWFAIL:
  1473. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1474. bfa_ioim_move_to_comp_q(ioim);
  1475. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1476. ioim);
  1477. break;
  1478. case BFA_IOIM_SM_SQRETRY:
  1479. if (bfa_ioim_maxretry_reached(ioim)) {
  1480. /* max retry reached, free IO */
  1481. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1482. bfa_ioim_move_to_comp_q(ioim);
  1483. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1484. __bfa_cb_ioim_failed, ioim);
  1485. break;
  1486. }
  1487. /* waiting for IO tag resource free */
  1488. bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
  1489. break;
  1490. default:
  1491. bfa_sm_fault(ioim->bfa, event);
  1492. }
  1493. }
  1494. /*
  1495. * IO is retried with new tag.
  1496. */
  1497. static void
  1498. bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1499. {
  1500. switch (event) {
  1501. case BFA_IOIM_SM_FREE:
  1502. /* abts and rrq done. Now retry the IO with new tag */
  1503. bfa_ioim_update_iotag(ioim);
  1504. if (!bfa_ioim_send_ioreq(ioim)) {
  1505. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1506. break;
  1507. }
  1508. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1509. break;
  1510. case BFA_IOIM_SM_CLEANUP:
  1511. ioim->iosp->abort_explicit = BFA_FALSE;
  1512. ioim->io_cbfn = __bfa_cb_ioim_failed;
  1513. if (bfa_ioim_send_abort(ioim))
  1514. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1515. else {
  1516. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1517. bfa_stats(ioim->itnim, qwait);
  1518. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1519. &ioim->iosp->reqq_wait);
  1520. }
  1521. break;
  1522. case BFA_IOIM_SM_HWFAIL:
  1523. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1524. bfa_ioim_move_to_comp_q(ioim);
  1525. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1526. __bfa_cb_ioim_failed, ioim);
  1527. break;
  1528. case BFA_IOIM_SM_ABORT:
  1529. /* in this state IO abort is done.
  1530. * Waiting for IO tag resource free.
  1531. */
  1532. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1533. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1534. ioim);
  1535. break;
  1536. default:
  1537. bfa_sm_fault(ioim->bfa, event);
  1538. }
  1539. }
  1540. /*
  1541. * IO is being aborted, waiting for completion from firmware.
  1542. */
  1543. static void
  1544. bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1545. {
  1546. bfa_trc(ioim->bfa, ioim->iotag);
  1547. bfa_trc(ioim->bfa, event);
  1548. switch (event) {
  1549. case BFA_IOIM_SM_COMP_GOOD:
  1550. case BFA_IOIM_SM_COMP:
  1551. case BFA_IOIM_SM_DONE:
  1552. case BFA_IOIM_SM_FREE:
  1553. break;
  1554. case BFA_IOIM_SM_ABORT_DONE:
  1555. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1556. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1557. ioim);
  1558. break;
  1559. case BFA_IOIM_SM_ABORT_COMP:
  1560. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1561. bfa_ioim_move_to_comp_q(ioim);
  1562. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1563. ioim);
  1564. break;
  1565. case BFA_IOIM_SM_COMP_UTAG:
  1566. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1567. bfa_ioim_move_to_comp_q(ioim);
  1568. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1569. ioim);
  1570. break;
  1571. case BFA_IOIM_SM_CLEANUP:
  1572. WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
  1573. ioim->iosp->abort_explicit = BFA_FALSE;
  1574. if (bfa_ioim_send_abort(ioim))
  1575. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1576. else {
  1577. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1578. bfa_stats(ioim->itnim, qwait);
  1579. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1580. &ioim->iosp->reqq_wait);
  1581. }
  1582. break;
  1583. case BFA_IOIM_SM_HWFAIL:
  1584. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1585. bfa_ioim_move_to_comp_q(ioim);
  1586. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1587. ioim);
  1588. break;
  1589. default:
  1590. bfa_sm_fault(ioim->bfa, event);
  1591. }
  1592. }
  1593. /*
  1594. * IO is being cleaned up (implicit abort), waiting for completion from
  1595. * firmware.
  1596. */
  1597. static void
  1598. bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1599. {
  1600. bfa_trc(ioim->bfa, ioim->iotag);
  1601. bfa_trc(ioim->bfa, event);
  1602. switch (event) {
  1603. case BFA_IOIM_SM_COMP_GOOD:
  1604. case BFA_IOIM_SM_COMP:
  1605. case BFA_IOIM_SM_DONE:
  1606. case BFA_IOIM_SM_FREE:
  1607. break;
  1608. case BFA_IOIM_SM_ABORT:
  1609. /*
  1610. * IO is already being aborted implicitly
  1611. */
  1612. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1613. break;
  1614. case BFA_IOIM_SM_ABORT_DONE:
  1615. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1616. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1617. bfa_ioim_notify_cleanup(ioim);
  1618. break;
  1619. case BFA_IOIM_SM_ABORT_COMP:
  1620. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1621. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1622. bfa_ioim_notify_cleanup(ioim);
  1623. break;
  1624. case BFA_IOIM_SM_COMP_UTAG:
  1625. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1626. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1627. bfa_ioim_notify_cleanup(ioim);
  1628. break;
  1629. case BFA_IOIM_SM_HWFAIL:
  1630. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1631. bfa_ioim_move_to_comp_q(ioim);
  1632. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1633. ioim);
  1634. break;
  1635. case BFA_IOIM_SM_CLEANUP:
  1636. /*
  1637. * IO can be in cleanup state already due to TM command.
  1638. * 2nd cleanup request comes from ITN offline event.
  1639. */
  1640. break;
  1641. default:
  1642. bfa_sm_fault(ioim->bfa, event);
  1643. }
  1644. }
  1645. /*
  1646. * IO is waiting for room in request CQ
  1647. */
  1648. static void
  1649. bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1650. {
  1651. bfa_trc(ioim->bfa, ioim->iotag);
  1652. bfa_trc(ioim->bfa, event);
  1653. switch (event) {
  1654. case BFA_IOIM_SM_QRESUME:
  1655. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1656. bfa_ioim_send_ioreq(ioim);
  1657. break;
  1658. case BFA_IOIM_SM_ABORT:
  1659. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1660. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1661. bfa_ioim_move_to_comp_q(ioim);
  1662. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1663. ioim);
  1664. break;
  1665. case BFA_IOIM_SM_CLEANUP:
  1666. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1667. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1668. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1669. ioim);
  1670. bfa_ioim_notify_cleanup(ioim);
  1671. break;
  1672. case BFA_IOIM_SM_HWFAIL:
  1673. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1674. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1675. bfa_ioim_move_to_comp_q(ioim);
  1676. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1677. ioim);
  1678. break;
  1679. default:
  1680. bfa_sm_fault(ioim->bfa, event);
  1681. }
  1682. }
  1683. /*
  1684. * Active IO is being aborted, waiting for room in request CQ.
  1685. */
  1686. static void
  1687. bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1688. {
  1689. bfa_trc(ioim->bfa, ioim->iotag);
  1690. bfa_trc(ioim->bfa, event);
  1691. switch (event) {
  1692. case BFA_IOIM_SM_QRESUME:
  1693. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  1694. bfa_ioim_send_abort(ioim);
  1695. break;
  1696. case BFA_IOIM_SM_CLEANUP:
  1697. WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
  1698. ioim->iosp->abort_explicit = BFA_FALSE;
  1699. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1700. break;
  1701. case BFA_IOIM_SM_COMP_GOOD:
  1702. case BFA_IOIM_SM_COMP:
  1703. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1704. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1705. bfa_ioim_move_to_comp_q(ioim);
  1706. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1707. ioim);
  1708. break;
  1709. case BFA_IOIM_SM_DONE:
  1710. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1711. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1712. bfa_ioim_move_to_comp_q(ioim);
  1713. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1714. ioim);
  1715. break;
  1716. case BFA_IOIM_SM_HWFAIL:
  1717. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1718. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1719. bfa_ioim_move_to_comp_q(ioim);
  1720. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1721. ioim);
  1722. break;
  1723. default:
  1724. bfa_sm_fault(ioim->bfa, event);
  1725. }
  1726. }
  1727. /*
  1728. * Active IO is being cleaned up, waiting for room in request CQ.
  1729. */
  1730. static void
  1731. bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1732. {
  1733. bfa_trc(ioim->bfa, ioim->iotag);
  1734. bfa_trc(ioim->bfa, event);
  1735. switch (event) {
  1736. case BFA_IOIM_SM_QRESUME:
  1737. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1738. bfa_ioim_send_abort(ioim);
  1739. break;
  1740. case BFA_IOIM_SM_ABORT:
  1741. /*
  1742. * IO is already being cleaned up implicitly
  1743. */
  1744. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1745. break;
  1746. case BFA_IOIM_SM_COMP_GOOD:
  1747. case BFA_IOIM_SM_COMP:
  1748. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1749. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1750. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1751. bfa_ioim_notify_cleanup(ioim);
  1752. break;
  1753. case BFA_IOIM_SM_DONE:
  1754. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1755. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1756. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1757. bfa_ioim_notify_cleanup(ioim);
  1758. break;
  1759. case BFA_IOIM_SM_HWFAIL:
  1760. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1761. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1762. bfa_ioim_move_to_comp_q(ioim);
  1763. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1764. ioim);
  1765. break;
  1766. default:
  1767. bfa_sm_fault(ioim->bfa, event);
  1768. }
  1769. }
  1770. /*
  1771. * IO bfa callback is pending.
  1772. */
  1773. static void
  1774. bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1775. {
  1776. switch (event) {
  1777. case BFA_IOIM_SM_HCB:
  1778. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  1779. bfa_ioim_free(ioim);
  1780. break;
  1781. case BFA_IOIM_SM_CLEANUP:
  1782. bfa_ioim_notify_cleanup(ioim);
  1783. break;
  1784. case BFA_IOIM_SM_HWFAIL:
  1785. break;
  1786. default:
  1787. bfa_sm_fault(ioim->bfa, event);
  1788. }
  1789. }
  1790. /*
  1791. * IO bfa callback is pending. IO resource cannot be freed.
  1792. */
  1793. static void
  1794. bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1795. {
  1796. bfa_trc(ioim->bfa, ioim->iotag);
  1797. bfa_trc(ioim->bfa, event);
  1798. switch (event) {
  1799. case BFA_IOIM_SM_HCB:
  1800. bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
  1801. list_del(&ioim->qe);
  1802. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
  1803. break;
  1804. case BFA_IOIM_SM_FREE:
  1805. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1806. break;
  1807. case BFA_IOIM_SM_CLEANUP:
  1808. bfa_ioim_notify_cleanup(ioim);
  1809. break;
  1810. case BFA_IOIM_SM_HWFAIL:
  1811. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1812. break;
  1813. default:
  1814. bfa_sm_fault(ioim->bfa, event);
  1815. }
  1816. }
  1817. /*
  1818. * IO is completed, waiting resource free from firmware.
  1819. */
  1820. static void
  1821. bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1822. {
  1823. bfa_trc(ioim->bfa, ioim->iotag);
  1824. bfa_trc(ioim->bfa, event);
  1825. switch (event) {
  1826. case BFA_IOIM_SM_FREE:
  1827. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  1828. bfa_ioim_free(ioim);
  1829. break;
  1830. case BFA_IOIM_SM_CLEANUP:
  1831. bfa_ioim_notify_cleanup(ioim);
  1832. break;
  1833. case BFA_IOIM_SM_HWFAIL:
  1834. break;
  1835. default:
  1836. bfa_sm_fault(ioim->bfa, event);
  1837. }
  1838. }
  1839. /*
  1840. * This is called from bfa_fcpim_start after the bfa_init() with flash read
  1841. * is complete by driver. now invalidate the stale content of lun mask
  1842. * like unit attention, rp tag and lp tag.
  1843. */
  1844. static void
  1845. bfa_ioim_lm_init(struct bfa_s *bfa)
  1846. {
  1847. struct bfa_lun_mask_s *lunm_list;
  1848. int i;
  1849. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1850. return;
  1851. lunm_list = bfa_get_lun_mask_list(bfa);
  1852. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1853. lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
  1854. lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
  1855. lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
  1856. }
  1857. }
  1858. /*
  1859. * Validate LUN for LUN masking
  1860. */
  1861. static enum bfa_ioim_lm_status
  1862. bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
  1863. struct bfa_rport_s *rp, struct scsi_lun lun)
  1864. {
  1865. u8 i;
  1866. struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
  1867. struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
  1868. struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
  1869. if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
  1870. (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
  1871. ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
  1872. return BFA_IOIM_LM_PRESENT;
  1873. }
  1874. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1875. if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  1876. continue;
  1877. if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
  1878. scsilun_to_int((struct scsi_lun *)&lun))
  1879. && (rp->rport_tag == lun_list[i].rp_tag)
  1880. && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
  1881. lun_list[i].lp_tag)) {
  1882. bfa_trc(ioim->bfa, lun_list[i].rp_tag);
  1883. bfa_trc(ioim->bfa, lun_list[i].lp_tag);
  1884. bfa_trc(ioim->bfa, scsilun_to_int(
  1885. (struct scsi_lun *)&lun_list[i].lun));
  1886. if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
  1887. ((cdb->scsi_cdb[0] != INQUIRY) ||
  1888. (cdb->scsi_cdb[0] != REPORT_LUNS))) {
  1889. lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
  1890. return BFA_IOIM_LM_RPL_DATA_CHANGED;
  1891. }
  1892. if (cdb->scsi_cdb[0] == REPORT_LUNS)
  1893. ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
  1894. return BFA_IOIM_LM_PRESENT;
  1895. }
  1896. }
  1897. if ((cdb->scsi_cdb[0] == INQUIRY) &&
  1898. (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
  1899. ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
  1900. return BFA_IOIM_LM_PRESENT;
  1901. }
  1902. if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
  1903. return BFA_IOIM_LM_LUN_NOT_RDY;
  1904. return BFA_IOIM_LM_LUN_NOT_SUP;
  1905. }
  1906. static bfa_boolean_t
  1907. bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
  1908. {
  1909. return BFA_TRUE;
  1910. }
  1911. static void
  1912. bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
  1913. int buf_lun_cnt)
  1914. {
  1915. struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
  1916. struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
  1917. struct scsi_lun lun;
  1918. int i, j;
  1919. bfa_trc(ioim->bfa, buf_lun_cnt);
  1920. for (j = 0; j < buf_lun_cnt; j++) {
  1921. lun = *((struct scsi_lun *)(lun_data + j));
  1922. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1923. if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  1924. continue;
  1925. if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
  1926. (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
  1927. (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
  1928. == scsilun_to_int((struct scsi_lun *)&lun))) {
  1929. lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
  1930. break;
  1931. }
  1932. } /* next lun in mask DB */
  1933. } /* next lun in buf */
  1934. }
  1935. static int
  1936. bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
  1937. struct scsi_report_luns_data_s *rl)
  1938. {
  1939. struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
  1940. struct scatterlist *sg = scsi_sglist(cmnd);
  1941. struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
  1942. struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
  1943. int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
  1944. int lun_across_sg_bytes, bytes_from_next_buf;
  1945. u64 last_lun, temp_last_lun;
  1946. /* fetch luns from the first sg element */
  1947. bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
  1948. (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
  1949. /* fetch luns from multiple sg elements */
  1950. scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
  1951. if (sgeid == 0) {
  1952. prev_sg_len = sg_dma_len(sg);
  1953. prev_rl_data = (struct scsi_lun *)
  1954. phys_to_virt(sg_dma_address(sg));
  1955. continue;
  1956. }
  1957. /* if the buf is having more data */
  1958. lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
  1959. if (lun_across_sg_bytes) {
  1960. bfa_trc(ioim->bfa, lun_across_sg_bytes);
  1961. bfa_stats(ioim->itnim, lm_lun_across_sg);
  1962. bytes_from_next_buf = sizeof(struct scsi_lun) -
  1963. lun_across_sg_bytes;
  1964. /* from next buf take higher bytes */
  1965. temp_last_lun = *((u64 *)
  1966. phys_to_virt(sg_dma_address(sg)));
  1967. last_lun |= temp_last_lun >>
  1968. (lun_across_sg_bytes * BITS_PER_BYTE);
  1969. /* from prev buf take higher bytes */
  1970. temp_last_lun = *((u64 *)(prev_rl_data +
  1971. (prev_sg_len - lun_across_sg_bytes)));
  1972. temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
  1973. last_lun = last_lun | (temp_last_lun <<
  1974. (bytes_from_next_buf * BITS_PER_BYTE));
  1975. bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
  1976. } else
  1977. bytes_from_next_buf = 0;
  1978. *pgdlen += sg_dma_len(sg);
  1979. prev_sg_len = sg_dma_len(sg);
  1980. prev_rl_data = (struct scsi_lun *)
  1981. phys_to_virt(sg_dma_address(sg));
  1982. bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
  1983. bytes_from_next_buf,
  1984. sg_dma_len(sg) / sizeof(struct scsi_lun));
  1985. }
  1986. /* update the report luns data - based on fetched luns */
  1987. sg = scsi_sglist(cmnd);
  1988. base_rl_data = (struct scsi_lun *)rl->lun;
  1989. base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
  1990. for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
  1991. if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
  1992. base_rl_data[j] = lun_list[i].lun;
  1993. lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
  1994. j++;
  1995. lun_fetched_cnt++;
  1996. }
  1997. if (j > base_count) {
  1998. j = 0;
  1999. sg = sg_next(sg);
  2000. base_rl_data = (struct scsi_lun *)
  2001. phys_to_virt(sg_dma_address(sg));
  2002. base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
  2003. }
  2004. }
  2005. bfa_trc(ioim->bfa, lun_fetched_cnt);
  2006. return lun_fetched_cnt;
  2007. }
  2008. static bfa_boolean_t
  2009. bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
  2010. {
  2011. struct scsi_inquiry_data_s *inq;
  2012. struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
  2013. ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
  2014. inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
  2015. bfa_trc(ioim->bfa, inq->device_type);
  2016. inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
  2017. return 0;
  2018. }
  2019. static bfa_boolean_t
  2020. bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
  2021. {
  2022. struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
  2023. struct scatterlist *sg = scsi_sglist(cmnd);
  2024. struct bfi_ioim_rsp_s *m;
  2025. struct scsi_report_luns_data_s *rl = NULL;
  2026. int lun_count = 0, lun_fetched_cnt = 0;
  2027. u32 residue, pgdlen = 0;
  2028. ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
  2029. if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
  2030. return BFA_TRUE;
  2031. m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
  2032. if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
  2033. return BFA_TRUE;
  2034. pgdlen = sg_dma_len(sg);
  2035. bfa_trc(ioim->bfa, pgdlen);
  2036. rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
  2037. lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
  2038. lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
  2039. if (lun_count == lun_fetched_cnt)
  2040. return BFA_TRUE;
  2041. bfa_trc(ioim->bfa, lun_count);
  2042. bfa_trc(ioim->bfa, lun_fetched_cnt);
  2043. bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
  2044. if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
  2045. rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
  2046. sizeof(struct scsi_lun);
  2047. else
  2048. bfa_stats(ioim->itnim, lm_small_buf_addresidue);
  2049. bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
  2050. bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
  2051. residue = be32_to_cpu(m->residue);
  2052. residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
  2053. bfa_stats(ioim->itnim, lm_wire_residue_changed);
  2054. m->residue = be32_to_cpu(residue);
  2055. bfa_trc(ioim->bfa, ioim->nsges);
  2056. return BFA_FALSE;
  2057. }
  2058. static void
  2059. __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
  2060. {
  2061. struct bfa_ioim_s *ioim = cbarg;
  2062. if (!complete) {
  2063. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2064. return;
  2065. }
  2066. bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
  2067. }
  2068. static void
  2069. __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
  2070. {
  2071. struct bfa_ioim_s *ioim = cbarg;
  2072. struct bfi_ioim_rsp_s *m;
  2073. u8 *snsinfo = NULL;
  2074. u8 sns_len = 0;
  2075. s32 residue = 0;
  2076. if (!complete) {
  2077. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2078. return;
  2079. }
  2080. m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
  2081. if (m->io_status == BFI_IOIM_STS_OK) {
  2082. /*
  2083. * setup sense information, if present
  2084. */
  2085. if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
  2086. m->sns_len) {
  2087. sns_len = m->sns_len;
  2088. snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
  2089. ioim->iotag);
  2090. }
  2091. /*
  2092. * setup residue value correctly for normal completions
  2093. */
  2094. if (m->resid_flags == FCP_RESID_UNDER) {
  2095. residue = be32_to_cpu(m->residue);
  2096. bfa_stats(ioim->itnim, iocomp_underrun);
  2097. }
  2098. if (m->resid_flags == FCP_RESID_OVER) {
  2099. residue = be32_to_cpu(m->residue);
  2100. residue = -residue;
  2101. bfa_stats(ioim->itnim, iocomp_overrun);
  2102. }
  2103. }
  2104. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
  2105. m->scsi_status, sns_len, snsinfo, residue);
  2106. }
  2107. static void
  2108. __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
  2109. {
  2110. struct bfa_ioim_s *ioim = cbarg;
  2111. int sns_len = 0xD;
  2112. u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
  2113. struct scsi_sense_s *snsinfo;
  2114. if (!complete) {
  2115. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2116. return;
  2117. }
  2118. snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
  2119. ioim->fcpim->fcp, ioim->iotag);
  2120. snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
  2121. snsinfo->add_sense_length = 0xa;
  2122. snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
  2123. snsinfo->sense_key = ILLEGAL_REQUEST;
  2124. bfa_trc(ioim->bfa, residue);
  2125. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
  2126. SCSI_STATUS_CHECK_CONDITION, sns_len,
  2127. (u8 *)snsinfo, residue);
  2128. }
  2129. static void
  2130. __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
  2131. {
  2132. struct bfa_ioim_s *ioim = cbarg;
  2133. int sns_len = 0xD;
  2134. u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
  2135. struct scsi_sense_s *snsinfo;
  2136. if (!complete) {
  2137. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2138. return;
  2139. }
  2140. snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
  2141. ioim->iotag);
  2142. snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
  2143. snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
  2144. snsinfo->asc = SCSI_ASC_TOCC;
  2145. snsinfo->add_sense_length = 0x6;
  2146. snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
  2147. bfa_trc(ioim->bfa, residue);
  2148. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
  2149. SCSI_STATUS_CHECK_CONDITION, sns_len,
  2150. (u8 *)snsinfo, residue);
  2151. }
  2152. static void
  2153. __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
  2154. {
  2155. struct bfa_ioim_s *ioim = cbarg;
  2156. int sns_len = 0xD;
  2157. u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
  2158. struct scsi_sense_s *snsinfo;
  2159. if (!complete) {
  2160. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2161. return;
  2162. }
  2163. snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
  2164. ioim->fcpim->fcp, ioim->iotag);
  2165. snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
  2166. snsinfo->add_sense_length = 0xa;
  2167. snsinfo->sense_key = NOT_READY;
  2168. snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
  2169. snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
  2170. bfa_trc(ioim->bfa, residue);
  2171. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
  2172. SCSI_STATUS_CHECK_CONDITION, sns_len,
  2173. (u8 *)snsinfo, residue);
  2174. }
  2175. void
  2176. bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
  2177. u16 rp_tag, u8 lp_tag)
  2178. {
  2179. struct bfa_lun_mask_s *lun_list;
  2180. u8 i;
  2181. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  2182. return;
  2183. lun_list = bfa_get_lun_mask_list(bfa);
  2184. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2185. if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
  2186. if ((lun_list[i].lp_wwn == lp_wwn) &&
  2187. (lun_list[i].rp_wwn == rp_wwn)) {
  2188. lun_list[i].rp_tag = rp_tag;
  2189. lun_list[i].lp_tag = lp_tag;
  2190. }
  2191. }
  2192. }
  2193. }
  2194. /*
  2195. * set UA for all active luns in LM DB
  2196. */
  2197. static void
  2198. bfa_ioim_lm_set_ua(struct bfa_s *bfa)
  2199. {
  2200. struct bfa_lun_mask_s *lunm_list;
  2201. int i;
  2202. lunm_list = bfa_get_lun_mask_list(bfa);
  2203. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2204. if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  2205. continue;
  2206. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  2207. }
  2208. }
  2209. bfa_status_t
  2210. bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
  2211. {
  2212. struct bfa_lunmask_cfg_s *lun_mask;
  2213. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  2214. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  2215. return BFA_STATUS_FAILED;
  2216. if (bfa_get_lun_mask_status(bfa) == update)
  2217. return BFA_STATUS_NO_CHANGE;
  2218. lun_mask = bfa_get_lun_mask(bfa);
  2219. lun_mask->status = update;
  2220. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
  2221. bfa_ioim_lm_set_ua(bfa);
  2222. return bfa_dconf_update(bfa);
  2223. }
  2224. bfa_status_t
  2225. bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
  2226. {
  2227. int i;
  2228. struct bfa_lun_mask_s *lunm_list;
  2229. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  2230. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  2231. return BFA_STATUS_FAILED;
  2232. lunm_list = bfa_get_lun_mask_list(bfa);
  2233. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2234. if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
  2235. if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
  2236. bfa_rport_unset_lunmask(bfa,
  2237. BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
  2238. }
  2239. }
  2240. memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
  2241. return bfa_dconf_update(bfa);
  2242. }
  2243. bfa_status_t
  2244. bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
  2245. {
  2246. struct bfa_lunmask_cfg_s *lun_mask;
  2247. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  2248. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  2249. return BFA_STATUS_FAILED;
  2250. lun_mask = bfa_get_lun_mask(bfa);
  2251. memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
  2252. return BFA_STATUS_OK;
  2253. }
  2254. bfa_status_t
  2255. bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
  2256. wwn_t rpwwn, struct scsi_lun lun)
  2257. {
  2258. struct bfa_lun_mask_s *lunm_list;
  2259. struct bfa_rport_s *rp = NULL;
  2260. int i, free_index = MAX_LUN_MASK_CFG + 1;
  2261. struct bfa_fcs_lport_s *port = NULL;
  2262. struct bfa_fcs_rport_s *rp_fcs;
  2263. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  2264. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  2265. return BFA_STATUS_FAILED;
  2266. port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
  2267. vf_id, *pwwn);
  2268. if (port) {
  2269. *pwwn = port->port_cfg.pwwn;
  2270. rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
  2271. if (rp_fcs)
  2272. rp = rp_fcs->bfa_rport;
  2273. }
  2274. lunm_list = bfa_get_lun_mask_list(bfa);
  2275. /* if entry exists */
  2276. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2277. if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  2278. free_index = i;
  2279. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2280. (lunm_list[i].rp_wwn == rpwwn) &&
  2281. (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
  2282. scsilun_to_int((struct scsi_lun *)&lun)))
  2283. return BFA_STATUS_ENTRY_EXISTS;
  2284. }
  2285. if (free_index > MAX_LUN_MASK_CFG)
  2286. return BFA_STATUS_MAX_ENTRY_REACHED;
  2287. if (rp) {
  2288. lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
  2289. rp->rport_info.local_pid);
  2290. lunm_list[free_index].rp_tag = rp->rport_tag;
  2291. } else {
  2292. lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
  2293. lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
  2294. }
  2295. lunm_list[free_index].lp_wwn = *pwwn;
  2296. lunm_list[free_index].rp_wwn = rpwwn;
  2297. lunm_list[free_index].lun = lun;
  2298. lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
  2299. /* set for all luns in this rp */
  2300. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2301. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2302. (lunm_list[i].rp_wwn == rpwwn))
  2303. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  2304. }
  2305. return bfa_dconf_update(bfa);
  2306. }
  2307. bfa_status_t
  2308. bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
  2309. wwn_t rpwwn, struct scsi_lun lun)
  2310. {
  2311. struct bfa_lun_mask_s *lunm_list;
  2312. struct bfa_rport_s *rp = NULL;
  2313. struct bfa_fcs_lport_s *port = NULL;
  2314. struct bfa_fcs_rport_s *rp_fcs;
  2315. int i;
  2316. /* in min cfg lunm_list could be NULL but no commands should run. */
  2317. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  2318. return BFA_STATUS_FAILED;
  2319. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  2320. bfa_trc(bfa, *pwwn);
  2321. bfa_trc(bfa, rpwwn);
  2322. bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
  2323. if (*pwwn == 0) {
  2324. port = bfa_fcs_lookup_port(
  2325. &((struct bfad_s *)bfa->bfad)->bfa_fcs,
  2326. vf_id, *pwwn);
  2327. if (port) {
  2328. *pwwn = port->port_cfg.pwwn;
  2329. rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
  2330. if (rp_fcs)
  2331. rp = rp_fcs->bfa_rport;
  2332. }
  2333. }
  2334. lunm_list = bfa_get_lun_mask_list(bfa);
  2335. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2336. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2337. (lunm_list[i].rp_wwn == rpwwn) &&
  2338. (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
  2339. scsilun_to_int((struct scsi_lun *)&lun))) {
  2340. lunm_list[i].lp_wwn = 0;
  2341. lunm_list[i].rp_wwn = 0;
  2342. int_to_scsilun(0, &lunm_list[i].lun);
  2343. lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
  2344. if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
  2345. lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
  2346. lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
  2347. }
  2348. return bfa_dconf_update(bfa);
  2349. }
  2350. }
  2351. /* set for all luns in this rp */
  2352. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2353. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2354. (lunm_list[i].rp_wwn == rpwwn))
  2355. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  2356. }
  2357. return BFA_STATUS_ENTRY_NOT_EXISTS;
  2358. }
  2359. static void
  2360. __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
  2361. {
  2362. struct bfa_ioim_s *ioim = cbarg;
  2363. if (!complete) {
  2364. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2365. return;
  2366. }
  2367. ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
  2368. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
  2369. 0, 0, NULL, 0);
  2370. }
  2371. static void
  2372. __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
  2373. {
  2374. struct bfa_ioim_s *ioim = cbarg;
  2375. bfa_stats(ioim->itnim, path_tov_expired);
  2376. if (!complete) {
  2377. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2378. return;
  2379. }
  2380. ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
  2381. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
  2382. 0, 0, NULL, 0);
  2383. }
  2384. static void
  2385. __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
  2386. {
  2387. struct bfa_ioim_s *ioim = cbarg;
  2388. if (!complete) {
  2389. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2390. return;
  2391. }
  2392. ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
  2393. bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
  2394. }
  2395. static void
  2396. bfa_ioim_sgpg_alloced(void *cbarg)
  2397. {
  2398. struct bfa_ioim_s *ioim = cbarg;
  2399. ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  2400. list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
  2401. ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
  2402. bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
  2403. }
  2404. /*
  2405. * Send I/O request to firmware.
  2406. */
  2407. static bfa_boolean_t
  2408. bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
  2409. {
  2410. struct bfa_itnim_s *itnim = ioim->itnim;
  2411. struct bfi_ioim_req_s *m;
  2412. static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
  2413. struct bfi_sge_s *sge, *sgpge;
  2414. u32 pgdlen = 0;
  2415. u32 fcp_dl;
  2416. u64 addr;
  2417. struct scatterlist *sg;
  2418. struct bfa_sgpg_s *sgpg;
  2419. struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
  2420. u32 i, sge_id, pgcumsz;
  2421. enum dma_data_direction dmadir;
  2422. /*
  2423. * check for room in queue to send request now
  2424. */
  2425. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  2426. if (!m) {
  2427. bfa_stats(ioim->itnim, qwait);
  2428. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  2429. &ioim->iosp->reqq_wait);
  2430. return BFA_FALSE;
  2431. }
  2432. /*
  2433. * build i/o request message next
  2434. */
  2435. m->io_tag = cpu_to_be16(ioim->iotag);
  2436. m->rport_hdl = ioim->itnim->rport->fw_handle;
  2437. m->io_timeout = 0;
  2438. sge = &m->sges[0];
  2439. sgpg = ioim->sgpg;
  2440. sge_id = 0;
  2441. sgpge = NULL;
  2442. pgcumsz = 0;
  2443. scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
  2444. if (i == 0) {
  2445. /* build inline IO SG element */
  2446. addr = bfa_sgaddr_le(sg_dma_address(sg));
  2447. sge->sga = *(union bfi_addr_u *) &addr;
  2448. pgdlen = sg_dma_len(sg);
  2449. sge->sg_len = pgdlen;
  2450. sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
  2451. BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
  2452. bfa_sge_to_be(sge);
  2453. sge++;
  2454. } else {
  2455. if (sge_id == 0)
  2456. sgpge = sgpg->sgpg->sges;
  2457. addr = bfa_sgaddr_le(sg_dma_address(sg));
  2458. sgpge->sga = *(union bfi_addr_u *) &addr;
  2459. sgpge->sg_len = sg_dma_len(sg);
  2460. pgcumsz += sgpge->sg_len;
  2461. /* set flags */
  2462. if (i < (ioim->nsges - 1) &&
  2463. sge_id < (BFI_SGPG_DATA_SGES - 1))
  2464. sgpge->flags = BFI_SGE_DATA;
  2465. else if (i < (ioim->nsges - 1))
  2466. sgpge->flags = BFI_SGE_DATA_CPL;
  2467. else
  2468. sgpge->flags = BFI_SGE_DATA_LAST;
  2469. bfa_sge_to_le(sgpge);
  2470. sgpge++;
  2471. if (i == (ioim->nsges - 1)) {
  2472. sgpge->flags = BFI_SGE_PGDLEN;
  2473. sgpge->sga.a32.addr_lo = 0;
  2474. sgpge->sga.a32.addr_hi = 0;
  2475. sgpge->sg_len = pgcumsz;
  2476. bfa_sge_to_le(sgpge);
  2477. } else if (++sge_id == BFI_SGPG_DATA_SGES) {
  2478. sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
  2479. sgpge->flags = BFI_SGE_LINK;
  2480. sgpge->sga = sgpg->sgpg_pa;
  2481. sgpge->sg_len = pgcumsz;
  2482. bfa_sge_to_le(sgpge);
  2483. sge_id = 0;
  2484. pgcumsz = 0;
  2485. }
  2486. }
  2487. }
  2488. if (ioim->nsges > BFI_SGE_INLINE) {
  2489. sge->sga = ioim->sgpg->sgpg_pa;
  2490. } else {
  2491. sge->sga.a32.addr_lo = 0;
  2492. sge->sga.a32.addr_hi = 0;
  2493. }
  2494. sge->sg_len = pgdlen;
  2495. sge->flags = BFI_SGE_PGDLEN;
  2496. bfa_sge_to_be(sge);
  2497. /*
  2498. * set up I/O command parameters
  2499. */
  2500. m->cmnd = cmnd_z0;
  2501. int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
  2502. dmadir = cmnd->sc_data_direction;
  2503. if (dmadir == DMA_TO_DEVICE)
  2504. m->cmnd.iodir = FCP_IODIR_WRITE;
  2505. else if (dmadir == DMA_FROM_DEVICE)
  2506. m->cmnd.iodir = FCP_IODIR_READ;
  2507. else
  2508. m->cmnd.iodir = FCP_IODIR_NONE;
  2509. m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
  2510. fcp_dl = scsi_bufflen(cmnd);
  2511. m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
  2512. /*
  2513. * set up I/O message header
  2514. */
  2515. switch (m->cmnd.iodir) {
  2516. case FCP_IODIR_READ:
  2517. bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
  2518. bfa_stats(itnim, input_reqs);
  2519. ioim->itnim->stats.rd_throughput += fcp_dl;
  2520. break;
  2521. case FCP_IODIR_WRITE:
  2522. bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
  2523. bfa_stats(itnim, output_reqs);
  2524. ioim->itnim->stats.wr_throughput += fcp_dl;
  2525. break;
  2526. case FCP_IODIR_RW:
  2527. bfa_stats(itnim, input_reqs);
  2528. bfa_stats(itnim, output_reqs);
  2529. default:
  2530. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
  2531. }
  2532. if (itnim->seq_rec ||
  2533. (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
  2534. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
  2535. /*
  2536. * queue I/O message to firmware
  2537. */
  2538. bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
  2539. return BFA_TRUE;
  2540. }
  2541. /*
  2542. * Setup any additional SG pages needed.Inline SG element is setup
  2543. * at queuing time.
  2544. */
  2545. static bfa_boolean_t
  2546. bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
  2547. {
  2548. u16 nsgpgs;
  2549. WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
  2550. /*
  2551. * allocate SG pages needed
  2552. */
  2553. nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  2554. if (!nsgpgs)
  2555. return BFA_TRUE;
  2556. if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
  2557. != BFA_STATUS_OK) {
  2558. bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
  2559. return BFA_FALSE;
  2560. }
  2561. ioim->nsgpgs = nsgpgs;
  2562. ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
  2563. return BFA_TRUE;
  2564. }
  2565. /*
  2566. * Send I/O abort request to firmware.
  2567. */
  2568. static bfa_boolean_t
  2569. bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
  2570. {
  2571. struct bfi_ioim_abort_req_s *m;
  2572. enum bfi_ioim_h2i msgop;
  2573. /*
  2574. * check for room in queue to send request now
  2575. */
  2576. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  2577. if (!m)
  2578. return BFA_FALSE;
  2579. /*
  2580. * build i/o request message next
  2581. */
  2582. if (ioim->iosp->abort_explicit)
  2583. msgop = BFI_IOIM_H2I_IOABORT_REQ;
  2584. else
  2585. msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
  2586. bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
  2587. m->io_tag = cpu_to_be16(ioim->iotag);
  2588. m->abort_tag = ++ioim->abort_tag;
  2589. /*
  2590. * queue I/O message to firmware
  2591. */
  2592. bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
  2593. return BFA_TRUE;
  2594. }
  2595. /*
  2596. * Call to resume any I/O requests waiting for room in request queue.
  2597. */
  2598. static void
  2599. bfa_ioim_qresume(void *cbarg)
  2600. {
  2601. struct bfa_ioim_s *ioim = cbarg;
  2602. bfa_stats(ioim->itnim, qresumes);
  2603. bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
  2604. }
  2605. static void
  2606. bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
  2607. {
  2608. /*
  2609. * Move IO from itnim queue to fcpim global queue since itnim will be
  2610. * freed.
  2611. */
  2612. list_del(&ioim->qe);
  2613. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2614. if (!ioim->iosp->tskim) {
  2615. if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
  2616. bfa_cb_dequeue(&ioim->hcb_qe);
  2617. list_del(&ioim->qe);
  2618. list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
  2619. }
  2620. bfa_itnim_iodone(ioim->itnim);
  2621. } else
  2622. bfa_wc_down(&ioim->iosp->tskim->wc);
  2623. }
  2624. static bfa_boolean_t
  2625. bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
  2626. {
  2627. if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
  2628. (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
  2629. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
  2630. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
  2631. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
  2632. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
  2633. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
  2634. return BFA_FALSE;
  2635. return BFA_TRUE;
  2636. }
  2637. void
  2638. bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
  2639. {
  2640. /*
  2641. * If path tov timer expired, failback with PATHTOV status - these
  2642. * IO requests are not normally retried by IO stack.
  2643. *
  2644. * Otherwise device cameback online and fail it with normal failed
  2645. * status so that IO stack retries these failed IO requests.
  2646. */
  2647. if (iotov)
  2648. ioim->io_cbfn = __bfa_cb_ioim_pathtov;
  2649. else {
  2650. ioim->io_cbfn = __bfa_cb_ioim_failed;
  2651. bfa_stats(ioim->itnim, iocom_nexus_abort);
  2652. }
  2653. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  2654. /*
  2655. * Move IO to fcpim global queue since itnim will be
  2656. * freed.
  2657. */
  2658. list_del(&ioim->qe);
  2659. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2660. }
  2661. /*
  2662. * Memory allocation and initialization.
  2663. */
  2664. void
  2665. bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
  2666. {
  2667. struct bfa_ioim_s *ioim;
  2668. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  2669. struct bfa_ioim_sp_s *iosp;
  2670. u16 i;
  2671. /*
  2672. * claim memory first
  2673. */
  2674. ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
  2675. fcpim->ioim_arr = ioim;
  2676. bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
  2677. iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
  2678. fcpim->ioim_sp_arr = iosp;
  2679. bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
  2680. /*
  2681. * Initialize ioim free queues
  2682. */
  2683. INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
  2684. INIT_LIST_HEAD(&fcpim->ioim_comp_q);
  2685. for (i = 0; i < fcpim->fcp->num_ioim_reqs;
  2686. i++, ioim++, iosp++) {
  2687. /*
  2688. * initialize IOIM
  2689. */
  2690. memset(ioim, 0, sizeof(struct bfa_ioim_s));
  2691. ioim->iotag = i;
  2692. ioim->bfa = fcpim->bfa;
  2693. ioim->fcpim = fcpim;
  2694. ioim->iosp = iosp;
  2695. ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
  2696. INIT_LIST_HEAD(&ioim->sgpg_q);
  2697. bfa_reqq_winit(&ioim->iosp->reqq_wait,
  2698. bfa_ioim_qresume, ioim);
  2699. bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
  2700. bfa_ioim_sgpg_alloced, ioim);
  2701. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  2702. }
  2703. }
  2704. void
  2705. bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  2706. {
  2707. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2708. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  2709. struct bfa_ioim_s *ioim;
  2710. u16 iotag;
  2711. enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
  2712. iotag = be16_to_cpu(rsp->io_tag);
  2713. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  2714. WARN_ON(ioim->iotag != iotag);
  2715. bfa_trc(ioim->bfa, ioim->iotag);
  2716. bfa_trc(ioim->bfa, rsp->io_status);
  2717. bfa_trc(ioim->bfa, rsp->reuse_io_tag);
  2718. if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
  2719. ioim->iosp->comp_rspmsg = *m;
  2720. switch (rsp->io_status) {
  2721. case BFI_IOIM_STS_OK:
  2722. bfa_stats(ioim->itnim, iocomp_ok);
  2723. if (rsp->reuse_io_tag == 0)
  2724. evt = BFA_IOIM_SM_DONE;
  2725. else
  2726. evt = BFA_IOIM_SM_COMP;
  2727. ioim->proc_rsp_data(ioim);
  2728. break;
  2729. case BFI_IOIM_STS_TIMEDOUT:
  2730. bfa_stats(ioim->itnim, iocomp_timedout);
  2731. case BFI_IOIM_STS_ABORTED:
  2732. rsp->io_status = BFI_IOIM_STS_ABORTED;
  2733. bfa_stats(ioim->itnim, iocomp_aborted);
  2734. if (rsp->reuse_io_tag == 0)
  2735. evt = BFA_IOIM_SM_DONE;
  2736. else
  2737. evt = BFA_IOIM_SM_COMP;
  2738. break;
  2739. case BFI_IOIM_STS_PROTO_ERR:
  2740. bfa_stats(ioim->itnim, iocom_proto_err);
  2741. WARN_ON(!rsp->reuse_io_tag);
  2742. evt = BFA_IOIM_SM_COMP;
  2743. break;
  2744. case BFI_IOIM_STS_SQER_NEEDED:
  2745. bfa_stats(ioim->itnim, iocom_sqer_needed);
  2746. WARN_ON(rsp->reuse_io_tag != 0);
  2747. evt = BFA_IOIM_SM_SQRETRY;
  2748. break;
  2749. case BFI_IOIM_STS_RES_FREE:
  2750. bfa_stats(ioim->itnim, iocom_res_free);
  2751. evt = BFA_IOIM_SM_FREE;
  2752. break;
  2753. case BFI_IOIM_STS_HOST_ABORTED:
  2754. bfa_stats(ioim->itnim, iocom_hostabrts);
  2755. if (rsp->abort_tag != ioim->abort_tag) {
  2756. bfa_trc(ioim->bfa, rsp->abort_tag);
  2757. bfa_trc(ioim->bfa, ioim->abort_tag);
  2758. ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
  2759. return;
  2760. }
  2761. if (rsp->reuse_io_tag)
  2762. evt = BFA_IOIM_SM_ABORT_COMP;
  2763. else
  2764. evt = BFA_IOIM_SM_ABORT_DONE;
  2765. break;
  2766. case BFI_IOIM_STS_UTAG:
  2767. bfa_stats(ioim->itnim, iocom_utags);
  2768. evt = BFA_IOIM_SM_COMP_UTAG;
  2769. break;
  2770. default:
  2771. WARN_ON(1);
  2772. }
  2773. ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
  2774. bfa_sm_send_event(ioim, evt);
  2775. }
  2776. void
  2777. bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  2778. {
  2779. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2780. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  2781. struct bfa_ioim_s *ioim;
  2782. u16 iotag;
  2783. iotag = be16_to_cpu(rsp->io_tag);
  2784. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  2785. WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
  2786. bfa_ioim_cb_profile_comp(fcpim, ioim);
  2787. if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
  2788. bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
  2789. return;
  2790. }
  2791. if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
  2792. bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
  2793. else
  2794. bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
  2795. }
  2796. /*
  2797. * Called by itnim to clean up IO while going offline.
  2798. */
  2799. void
  2800. bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
  2801. {
  2802. bfa_trc(ioim->bfa, ioim->iotag);
  2803. bfa_stats(ioim->itnim, io_cleanups);
  2804. ioim->iosp->tskim = NULL;
  2805. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  2806. }
  2807. void
  2808. bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
  2809. {
  2810. bfa_trc(ioim->bfa, ioim->iotag);
  2811. bfa_stats(ioim->itnim, io_tmaborts);
  2812. ioim->iosp->tskim = tskim;
  2813. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  2814. }
  2815. /*
  2816. * IOC failure handling.
  2817. */
  2818. void
  2819. bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
  2820. {
  2821. bfa_trc(ioim->bfa, ioim->iotag);
  2822. bfa_stats(ioim->itnim, io_iocdowns);
  2823. bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
  2824. }
  2825. /*
  2826. * IO offline TOV popped. Fail the pending IO.
  2827. */
  2828. void
  2829. bfa_ioim_tov(struct bfa_ioim_s *ioim)
  2830. {
  2831. bfa_trc(ioim->bfa, ioim->iotag);
  2832. bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
  2833. }
  2834. /*
  2835. * Allocate IOIM resource for initiator mode I/O request.
  2836. */
  2837. struct bfa_ioim_s *
  2838. bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
  2839. struct bfa_itnim_s *itnim, u16 nsges)
  2840. {
  2841. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2842. struct bfa_ioim_s *ioim;
  2843. struct bfa_iotag_s *iotag = NULL;
  2844. /*
  2845. * alocate IOIM resource
  2846. */
  2847. bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
  2848. if (!iotag) {
  2849. bfa_stats(itnim, no_iotags);
  2850. return NULL;
  2851. }
  2852. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
  2853. ioim->dio = dio;
  2854. ioim->itnim = itnim;
  2855. ioim->nsges = nsges;
  2856. ioim->nsgpgs = 0;
  2857. bfa_stats(itnim, total_ios);
  2858. fcpim->ios_active++;
  2859. list_add_tail(&ioim->qe, &itnim->io_q);
  2860. return ioim;
  2861. }
  2862. void
  2863. bfa_ioim_free(struct bfa_ioim_s *ioim)
  2864. {
  2865. struct bfa_fcpim_s *fcpim = ioim->fcpim;
  2866. struct bfa_iotag_s *iotag;
  2867. if (ioim->nsgpgs > 0)
  2868. bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
  2869. bfa_stats(ioim->itnim, io_comps);
  2870. fcpim->ios_active--;
  2871. ioim->iotag &= BFA_IOIM_IOTAG_MASK;
  2872. WARN_ON(!(ioim->iotag <
  2873. (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
  2874. iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
  2875. if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
  2876. list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
  2877. else
  2878. list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
  2879. list_del(&ioim->qe);
  2880. }
  2881. void
  2882. bfa_ioim_start(struct bfa_ioim_s *ioim)
  2883. {
  2884. struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
  2885. struct bfa_lps_s *lps;
  2886. enum bfa_ioim_lm_status status;
  2887. struct scsi_lun scsilun;
  2888. if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
  2889. lps = BFA_IOIM_TO_LPS(ioim);
  2890. int_to_scsilun(cmnd->device->lun, &scsilun);
  2891. status = bfa_ioim_lm_check(ioim, lps,
  2892. ioim->itnim->rport, scsilun);
  2893. if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
  2894. bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
  2895. bfa_stats(ioim->itnim, lm_lun_not_rdy);
  2896. return;
  2897. }
  2898. if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
  2899. bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
  2900. bfa_stats(ioim->itnim, lm_lun_not_sup);
  2901. return;
  2902. }
  2903. if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
  2904. bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
  2905. bfa_stats(ioim->itnim, lm_rpl_data_changed);
  2906. return;
  2907. }
  2908. }
  2909. bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
  2910. /*
  2911. * Obtain the queue over which this request has to be issued
  2912. */
  2913. ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
  2914. BFA_FALSE : bfa_itnim_get_reqq(ioim);
  2915. bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
  2916. }
  2917. /*
  2918. * Driver I/O abort request.
  2919. */
  2920. bfa_status_t
  2921. bfa_ioim_abort(struct bfa_ioim_s *ioim)
  2922. {
  2923. bfa_trc(ioim->bfa, ioim->iotag);
  2924. if (!bfa_ioim_is_abortable(ioim))
  2925. return BFA_STATUS_FAILED;
  2926. bfa_stats(ioim->itnim, io_aborts);
  2927. bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
  2928. return BFA_STATUS_OK;
  2929. }
  2930. /*
  2931. * BFA TSKIM state machine functions
  2932. */
  2933. /*
  2934. * Task management command beginning state.
  2935. */
  2936. static void
  2937. bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2938. {
  2939. bfa_trc(tskim->bfa, event);
  2940. switch (event) {
  2941. case BFA_TSKIM_SM_START:
  2942. bfa_sm_set_state(tskim, bfa_tskim_sm_active);
  2943. bfa_tskim_gather_ios(tskim);
  2944. /*
  2945. * If device is offline, do not send TM on wire. Just cleanup
  2946. * any pending IO requests and complete TM request.
  2947. */
  2948. if (!bfa_itnim_is_online(tskim->itnim)) {
  2949. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2950. tskim->tsk_status = BFI_TSKIM_STS_OK;
  2951. bfa_tskim_cleanup_ios(tskim);
  2952. return;
  2953. }
  2954. if (!bfa_tskim_send(tskim)) {
  2955. bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
  2956. bfa_stats(tskim->itnim, tm_qwait);
  2957. bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
  2958. &tskim->reqq_wait);
  2959. }
  2960. break;
  2961. default:
  2962. bfa_sm_fault(tskim->bfa, event);
  2963. }
  2964. }
  2965. /*
  2966. * TM command is active, awaiting completion from firmware to
  2967. * cleanup IO requests in TM scope.
  2968. */
  2969. static void
  2970. bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2971. {
  2972. bfa_trc(tskim->bfa, event);
  2973. switch (event) {
  2974. case BFA_TSKIM_SM_DONE:
  2975. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2976. bfa_tskim_cleanup_ios(tskim);
  2977. break;
  2978. case BFA_TSKIM_SM_CLEANUP:
  2979. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
  2980. if (!bfa_tskim_send_abort(tskim)) {
  2981. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
  2982. bfa_stats(tskim->itnim, tm_qwait);
  2983. bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
  2984. &tskim->reqq_wait);
  2985. }
  2986. break;
  2987. case BFA_TSKIM_SM_HWFAIL:
  2988. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2989. bfa_tskim_iocdisable_ios(tskim);
  2990. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2991. break;
  2992. default:
  2993. bfa_sm_fault(tskim->bfa, event);
  2994. }
  2995. }
  2996. /*
  2997. * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
  2998. * completion event from firmware.
  2999. */
  3000. static void
  3001. bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  3002. {
  3003. bfa_trc(tskim->bfa, event);
  3004. switch (event) {
  3005. case BFA_TSKIM_SM_DONE:
  3006. /*
  3007. * Ignore and wait for ABORT completion from firmware.
  3008. */
  3009. break;
  3010. case BFA_TSKIM_SM_CLEANUP_DONE:
  3011. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  3012. bfa_tskim_cleanup_ios(tskim);
  3013. break;
  3014. case BFA_TSKIM_SM_HWFAIL:
  3015. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  3016. bfa_tskim_iocdisable_ios(tskim);
  3017. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  3018. break;
  3019. default:
  3020. bfa_sm_fault(tskim->bfa, event);
  3021. }
  3022. }
  3023. static void
  3024. bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  3025. {
  3026. bfa_trc(tskim->bfa, event);
  3027. switch (event) {
  3028. case BFA_TSKIM_SM_IOS_DONE:
  3029. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  3030. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
  3031. break;
  3032. case BFA_TSKIM_SM_CLEANUP:
  3033. /*
  3034. * Ignore, TM command completed on wire.
  3035. * Notify TM conmpletion on IO cleanup completion.
  3036. */
  3037. break;
  3038. case BFA_TSKIM_SM_HWFAIL:
  3039. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  3040. bfa_tskim_iocdisable_ios(tskim);
  3041. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  3042. break;
  3043. default:
  3044. bfa_sm_fault(tskim->bfa, event);
  3045. }
  3046. }
  3047. /*
  3048. * Task management command is waiting for room in request CQ
  3049. */
  3050. static void
  3051. bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  3052. {
  3053. bfa_trc(tskim->bfa, event);
  3054. switch (event) {
  3055. case BFA_TSKIM_SM_QRESUME:
  3056. bfa_sm_set_state(tskim, bfa_tskim_sm_active);
  3057. bfa_tskim_send(tskim);
  3058. break;
  3059. case BFA_TSKIM_SM_CLEANUP:
  3060. /*
  3061. * No need to send TM on wire since ITN is offline.
  3062. */
  3063. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  3064. bfa_reqq_wcancel(&tskim->reqq_wait);
  3065. bfa_tskim_cleanup_ios(tskim);
  3066. break;
  3067. case BFA_TSKIM_SM_HWFAIL:
  3068. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  3069. bfa_reqq_wcancel(&tskim->reqq_wait);
  3070. bfa_tskim_iocdisable_ios(tskim);
  3071. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  3072. break;
  3073. default:
  3074. bfa_sm_fault(tskim->bfa, event);
  3075. }
  3076. }
  3077. /*
  3078. * Task management command is active, awaiting for room in request CQ
  3079. * to send clean up request.
  3080. */
  3081. static void
  3082. bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
  3083. enum bfa_tskim_event event)
  3084. {
  3085. bfa_trc(tskim->bfa, event);
  3086. switch (event) {
  3087. case BFA_TSKIM_SM_DONE:
  3088. bfa_reqq_wcancel(&tskim->reqq_wait);
  3089. /*
  3090. * Fall through !!!
  3091. */
  3092. case BFA_TSKIM_SM_QRESUME:
  3093. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
  3094. bfa_tskim_send_abort(tskim);
  3095. break;
  3096. case BFA_TSKIM_SM_HWFAIL:
  3097. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  3098. bfa_reqq_wcancel(&tskim->reqq_wait);
  3099. bfa_tskim_iocdisable_ios(tskim);
  3100. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  3101. break;
  3102. default:
  3103. bfa_sm_fault(tskim->bfa, event);
  3104. }
  3105. }
  3106. /*
  3107. * BFA callback is pending
  3108. */
  3109. static void
  3110. bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  3111. {
  3112. bfa_trc(tskim->bfa, event);
  3113. switch (event) {
  3114. case BFA_TSKIM_SM_HCB:
  3115. bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
  3116. bfa_tskim_free(tskim);
  3117. break;
  3118. case BFA_TSKIM_SM_CLEANUP:
  3119. bfa_tskim_notify_comp(tskim);
  3120. break;
  3121. case BFA_TSKIM_SM_HWFAIL:
  3122. break;
  3123. default:
  3124. bfa_sm_fault(tskim->bfa, event);
  3125. }
  3126. }
  3127. static void
  3128. __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
  3129. {
  3130. struct bfa_tskim_s *tskim = cbarg;
  3131. if (!complete) {
  3132. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
  3133. return;
  3134. }
  3135. bfa_stats(tskim->itnim, tm_success);
  3136. bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
  3137. }
  3138. static void
  3139. __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
  3140. {
  3141. struct bfa_tskim_s *tskim = cbarg;
  3142. if (!complete) {
  3143. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
  3144. return;
  3145. }
  3146. bfa_stats(tskim->itnim, tm_failures);
  3147. bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
  3148. BFI_TSKIM_STS_FAILED);
  3149. }
  3150. static bfa_boolean_t
  3151. bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
  3152. {
  3153. switch (tskim->tm_cmnd) {
  3154. case FCP_TM_TARGET_RESET:
  3155. return BFA_TRUE;
  3156. case FCP_TM_ABORT_TASK_SET:
  3157. case FCP_TM_CLEAR_TASK_SET:
  3158. case FCP_TM_LUN_RESET:
  3159. case FCP_TM_CLEAR_ACA:
  3160. return !memcmp(&tskim->lun, &lun, sizeof(lun));
  3161. default:
  3162. WARN_ON(1);
  3163. }
  3164. return BFA_FALSE;
  3165. }
  3166. /*
  3167. * Gather affected IO requests and task management commands.
  3168. */
  3169. static void
  3170. bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
  3171. {
  3172. struct bfa_itnim_s *itnim = tskim->itnim;
  3173. struct bfa_ioim_s *ioim;
  3174. struct list_head *qe, *qen;
  3175. struct scsi_cmnd *cmnd;
  3176. struct scsi_lun scsilun;
  3177. INIT_LIST_HEAD(&tskim->io_q);
  3178. /*
  3179. * Gather any active IO requests first.
  3180. */
  3181. list_for_each_safe(qe, qen, &itnim->io_q) {
  3182. ioim = (struct bfa_ioim_s *) qe;
  3183. cmnd = (struct scsi_cmnd *) ioim->dio;
  3184. int_to_scsilun(cmnd->device->lun, &scsilun);
  3185. if (bfa_tskim_match_scope(tskim, scsilun)) {
  3186. list_del(&ioim->qe);
  3187. list_add_tail(&ioim->qe, &tskim->io_q);
  3188. }
  3189. }
  3190. /*
  3191. * Failback any pending IO requests immediately.
  3192. */
  3193. list_for_each_safe(qe, qen, &itnim->pending_q) {
  3194. ioim = (struct bfa_ioim_s *) qe;
  3195. cmnd = (struct scsi_cmnd *) ioim->dio;
  3196. int_to_scsilun(cmnd->device->lun, &scsilun);
  3197. if (bfa_tskim_match_scope(tskim, scsilun)) {
  3198. list_del(&ioim->qe);
  3199. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  3200. bfa_ioim_tov(ioim);
  3201. }
  3202. }
  3203. }
  3204. /*
  3205. * IO cleanup completion
  3206. */
  3207. static void
  3208. bfa_tskim_cleanp_comp(void *tskim_cbarg)
  3209. {
  3210. struct bfa_tskim_s *tskim = tskim_cbarg;
  3211. bfa_stats(tskim->itnim, tm_io_comps);
  3212. bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
  3213. }
  3214. /*
  3215. * Gather affected IO requests and task management commands.
  3216. */
  3217. static void
  3218. bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
  3219. {
  3220. struct bfa_ioim_s *ioim;
  3221. struct list_head *qe, *qen;
  3222. bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
  3223. list_for_each_safe(qe, qen, &tskim->io_q) {
  3224. ioim = (struct bfa_ioim_s *) qe;
  3225. bfa_wc_up(&tskim->wc);
  3226. bfa_ioim_cleanup_tm(ioim, tskim);
  3227. }
  3228. bfa_wc_wait(&tskim->wc);
  3229. }
  3230. /*
  3231. * Send task management request to firmware.
  3232. */
  3233. static bfa_boolean_t
  3234. bfa_tskim_send(struct bfa_tskim_s *tskim)
  3235. {
  3236. struct bfa_itnim_s *itnim = tskim->itnim;
  3237. struct bfi_tskim_req_s *m;
  3238. /*
  3239. * check for room in queue to send request now
  3240. */
  3241. m = bfa_reqq_next(tskim->bfa, itnim->reqq);
  3242. if (!m)
  3243. return BFA_FALSE;
  3244. /*
  3245. * build i/o request message next
  3246. */
  3247. bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
  3248. bfa_fn_lpu(tskim->bfa));
  3249. m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
  3250. m->itn_fhdl = tskim->itnim->rport->fw_handle;
  3251. m->t_secs = tskim->tsecs;
  3252. m->lun = tskim->lun;
  3253. m->tm_flags = tskim->tm_cmnd;
  3254. /*
  3255. * queue I/O message to firmware
  3256. */
  3257. bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
  3258. return BFA_TRUE;
  3259. }
  3260. /*
  3261. * Send abort request to cleanup an active TM to firmware.
  3262. */
  3263. static bfa_boolean_t
  3264. bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
  3265. {
  3266. struct bfa_itnim_s *itnim = tskim->itnim;
  3267. struct bfi_tskim_abortreq_s *m;
  3268. /*
  3269. * check for room in queue to send request now
  3270. */
  3271. m = bfa_reqq_next(tskim->bfa, itnim->reqq);
  3272. if (!m)
  3273. return BFA_FALSE;
  3274. /*
  3275. * build i/o request message next
  3276. */
  3277. bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
  3278. bfa_fn_lpu(tskim->bfa));
  3279. m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
  3280. /*
  3281. * queue I/O message to firmware
  3282. */
  3283. bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
  3284. return BFA_TRUE;
  3285. }
  3286. /*
  3287. * Call to resume task management cmnd waiting for room in request queue.
  3288. */
  3289. static void
  3290. bfa_tskim_qresume(void *cbarg)
  3291. {
  3292. struct bfa_tskim_s *tskim = cbarg;
  3293. bfa_stats(tskim->itnim, tm_qresumes);
  3294. bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
  3295. }
  3296. /*
  3297. * Cleanup IOs associated with a task mangement command on IOC failures.
  3298. */
  3299. static void
  3300. bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
  3301. {
  3302. struct bfa_ioim_s *ioim;
  3303. struct list_head *qe, *qen;
  3304. list_for_each_safe(qe, qen, &tskim->io_q) {
  3305. ioim = (struct bfa_ioim_s *) qe;
  3306. bfa_ioim_iocdisable(ioim);
  3307. }
  3308. }
  3309. /*
  3310. * Notification on completions from related ioim.
  3311. */
  3312. void
  3313. bfa_tskim_iodone(struct bfa_tskim_s *tskim)
  3314. {
  3315. bfa_wc_down(&tskim->wc);
  3316. }
  3317. /*
  3318. * Handle IOC h/w failure notification from itnim.
  3319. */
  3320. void
  3321. bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
  3322. {
  3323. tskim->notify = BFA_FALSE;
  3324. bfa_stats(tskim->itnim, tm_iocdowns);
  3325. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
  3326. }
  3327. /*
  3328. * Cleanup TM command and associated IOs as part of ITNIM offline.
  3329. */
  3330. void
  3331. bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
  3332. {
  3333. tskim->notify = BFA_TRUE;
  3334. bfa_stats(tskim->itnim, tm_cleanups);
  3335. bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
  3336. }
  3337. /*
  3338. * Memory allocation and initialization.
  3339. */
  3340. void
  3341. bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
  3342. {
  3343. struct bfa_tskim_s *tskim;
  3344. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  3345. u16 i;
  3346. INIT_LIST_HEAD(&fcpim->tskim_free_q);
  3347. INIT_LIST_HEAD(&fcpim->tskim_unused_q);
  3348. tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
  3349. fcpim->tskim_arr = tskim;
  3350. for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
  3351. /*
  3352. * initialize TSKIM
  3353. */
  3354. memset(tskim, 0, sizeof(struct bfa_tskim_s));
  3355. tskim->tsk_tag = i;
  3356. tskim->bfa = fcpim->bfa;
  3357. tskim->fcpim = fcpim;
  3358. tskim->notify = BFA_FALSE;
  3359. bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
  3360. tskim);
  3361. bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
  3362. list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
  3363. }
  3364. bfa_mem_kva_curp(fcp) = (u8 *) tskim;
  3365. }
  3366. void
  3367. bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  3368. {
  3369. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3370. struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
  3371. struct bfa_tskim_s *tskim;
  3372. u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
  3373. tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
  3374. WARN_ON(tskim->tsk_tag != tsk_tag);
  3375. tskim->tsk_status = rsp->tsk_status;
  3376. /*
  3377. * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
  3378. * requests. All other statuses are for normal completions.
  3379. */
  3380. if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
  3381. bfa_stats(tskim->itnim, tm_cleanup_comps);
  3382. bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
  3383. } else {
  3384. bfa_stats(tskim->itnim, tm_fw_rsps);
  3385. bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
  3386. }
  3387. }
  3388. struct bfa_tskim_s *
  3389. bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
  3390. {
  3391. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3392. struct bfa_tskim_s *tskim;
  3393. bfa_q_deq(&fcpim->tskim_free_q, &tskim);
  3394. if (tskim)
  3395. tskim->dtsk = dtsk;
  3396. return tskim;
  3397. }
  3398. void
  3399. bfa_tskim_free(struct bfa_tskim_s *tskim)
  3400. {
  3401. WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
  3402. list_del(&tskim->qe);
  3403. list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
  3404. }
  3405. /*
  3406. * Start a task management command.
  3407. *
  3408. * @param[in] tskim BFA task management command instance
  3409. * @param[in] itnim i-t nexus for the task management command
  3410. * @param[in] lun lun, if applicable
  3411. * @param[in] tm_cmnd Task management command code.
  3412. * @param[in] t_secs Timeout in seconds
  3413. *
  3414. * @return None.
  3415. */
  3416. void
  3417. bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
  3418. struct scsi_lun lun,
  3419. enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
  3420. {
  3421. tskim->itnim = itnim;
  3422. tskim->lun = lun;
  3423. tskim->tm_cmnd = tm_cmnd;
  3424. tskim->tsecs = tsecs;
  3425. tskim->notify = BFA_FALSE;
  3426. bfa_stats(itnim, tm_cmnds);
  3427. list_add_tail(&tskim->qe, &itnim->tsk_q);
  3428. bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
  3429. }
  3430. void
  3431. bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
  3432. {
  3433. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3434. struct list_head *qe;
  3435. int i;
  3436. for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
  3437. bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
  3438. list_add_tail(qe, &fcpim->tskim_unused_q);
  3439. }
  3440. }
  3441. /* BFA FCP module - parent module for fcpim */
  3442. BFA_MODULE(fcp);
  3443. static void
  3444. bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  3445. struct bfa_s *bfa)
  3446. {
  3447. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3448. struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
  3449. struct bfa_mem_dma_s *seg_ptr;
  3450. u16 nsegs, idx, per_seg_ios, num_io_req;
  3451. u32 km_len = 0;
  3452. /*
  3453. * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
  3454. * So if the values are non zero, adjust them appropriately.
  3455. */
  3456. if (cfg->fwcfg.num_ioim_reqs &&
  3457. cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
  3458. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
  3459. else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
  3460. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
  3461. if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
  3462. cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
  3463. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3464. if (num_io_req > BFA_IO_MAX) {
  3465. if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
  3466. cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
  3467. cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
  3468. } else if (cfg->fwcfg.num_fwtio_reqs)
  3469. cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
  3470. else
  3471. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
  3472. }
  3473. bfa_fcpim_meminfo(cfg, &km_len);
  3474. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3475. km_len += num_io_req * sizeof(struct bfa_iotag_s);
  3476. km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
  3477. /* dma memory */
  3478. nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
  3479. per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
  3480. bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
  3481. if (num_io_req >= per_seg_ios) {
  3482. num_io_req -= per_seg_ios;
  3483. bfa_mem_dma_setup(minfo, seg_ptr,
  3484. per_seg_ios * BFI_IOIM_SNSLEN);
  3485. } else
  3486. bfa_mem_dma_setup(minfo, seg_ptr,
  3487. num_io_req * BFI_IOIM_SNSLEN);
  3488. }
  3489. /* kva memory */
  3490. bfa_mem_kva_setup(minfo, fcp_kva, km_len);
  3491. }
  3492. static void
  3493. bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  3494. struct bfa_pcidev_s *pcidev)
  3495. {
  3496. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3497. struct bfa_mem_dma_s *seg_ptr;
  3498. u16 idx, nsegs, num_io_req;
  3499. fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
  3500. fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
  3501. fcp->num_itns = cfg->fwcfg.num_rports;
  3502. fcp->bfa = bfa;
  3503. /*
  3504. * Setup the pool of snsbase addr's, that is passed to fw as
  3505. * part of bfi_iocfc_cfg_s.
  3506. */
  3507. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3508. nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
  3509. bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
  3510. if (!bfa_mem_dma_virt(seg_ptr))
  3511. break;
  3512. fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
  3513. fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
  3514. bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
  3515. }
  3516. bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
  3517. bfa_iotag_attach(fcp);
  3518. fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
  3519. bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
  3520. (fcp->num_itns * sizeof(struct bfa_itn_s));
  3521. memset(fcp->itn_arr, 0,
  3522. (fcp->num_itns * sizeof(struct bfa_itn_s)));
  3523. }
  3524. static void
  3525. bfa_fcp_detach(struct bfa_s *bfa)
  3526. {
  3527. }
  3528. static void
  3529. bfa_fcp_start(struct bfa_s *bfa)
  3530. {
  3531. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3532. /*
  3533. * bfa_init() with flash read is complete. now invalidate the stale
  3534. * content of lun mask like unit attention, rp tag and lp tag.
  3535. */
  3536. bfa_ioim_lm_init(fcp->bfa);
  3537. }
  3538. static void
  3539. bfa_fcp_stop(struct bfa_s *bfa)
  3540. {
  3541. }
  3542. static void
  3543. bfa_fcp_iocdisable(struct bfa_s *bfa)
  3544. {
  3545. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3546. /* Enqueue unused ioim resources to free_q */
  3547. list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
  3548. bfa_fcpim_iocdisable(fcp);
  3549. }
  3550. void
  3551. bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
  3552. {
  3553. struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
  3554. struct list_head *qe;
  3555. int i;
  3556. for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
  3557. bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
  3558. list_add_tail(qe, &mod->iotag_unused_q);
  3559. }
  3560. }
  3561. void
  3562. bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
  3563. void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
  3564. {
  3565. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3566. struct bfa_itn_s *itn;
  3567. itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
  3568. itn->isr = isr;
  3569. }
  3570. /*
  3571. * Itn interrupt processing.
  3572. */
  3573. void
  3574. bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  3575. {
  3576. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3577. union bfi_itn_i2h_msg_u msg;
  3578. struct bfa_itn_s *itn;
  3579. msg.msg = m;
  3580. itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
  3581. if (itn->isr)
  3582. itn->isr(bfa, m);
  3583. else
  3584. WARN_ON(1);
  3585. }
  3586. void
  3587. bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
  3588. {
  3589. struct bfa_iotag_s *iotag;
  3590. u16 num_io_req, i;
  3591. iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
  3592. fcp->iotag_arr = iotag;
  3593. INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
  3594. INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
  3595. INIT_LIST_HEAD(&fcp->iotag_unused_q);
  3596. num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
  3597. for (i = 0; i < num_io_req; i++, iotag++) {
  3598. memset(iotag, 0, sizeof(struct bfa_iotag_s));
  3599. iotag->tag = i;
  3600. if (i < fcp->num_ioim_reqs)
  3601. list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
  3602. else
  3603. list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
  3604. }
  3605. bfa_mem_kva_curp(fcp) = (u8 *) iotag;
  3606. }