qla_target.c 135 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973
  1. /*
  2. * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
  3. *
  4. * based on qla2x00t.c code:
  5. *
  6. * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
  7. * Copyright (C) 2004 - 2005 Leonid Stoljar
  8. * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
  9. * Copyright (C) 2006 - 2010 ID7 Ltd.
  10. *
  11. * Forward port and refactoring to modern qla2xxx and target/configfs
  12. *
  13. * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation, version 2
  18. * of the License.
  19. *
  20. * This program is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23. * GNU General Public License for more details.
  24. */
  25. #include <linux/module.h>
  26. #include <linux/init.h>
  27. #include <linux/types.h>
  28. #include <linux/blkdev.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/pci.h>
  31. #include <linux/delay.h>
  32. #include <linux/list.h>
  33. #include <linux/workqueue.h>
  34. #include <asm/unaligned.h>
  35. #include <scsi/scsi.h>
  36. #include <scsi/scsi_host.h>
  37. #include <scsi/scsi_tcq.h>
  38. #include <target/target_core_base.h>
  39. #include <target/target_core_fabric.h>
  40. #include "qla_def.h"
  41. #include "qla_target.h"
  42. static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
  43. module_param(qlini_mode, charp, S_IRUGO);
  44. MODULE_PARM_DESC(qlini_mode,
  45. "Determines when initiator mode will be enabled. Possible values: "
  46. "\"exclusive\" - initiator mode will be enabled on load, "
  47. "disabled on enabling target mode and then on disabling target mode "
  48. "enabled back; "
  49. "\"disabled\" - initiator mode will never be enabled; "
  50. "\"enabled\" (default) - initiator mode will always stay enabled.");
  51. static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
  52. /*
  53. * From scsi/fc/fc_fcp.h
  54. */
  55. enum fcp_resp_rsp_codes {
  56. FCP_TMF_CMPL = 0,
  57. FCP_DATA_LEN_INVALID = 1,
  58. FCP_CMND_FIELDS_INVALID = 2,
  59. FCP_DATA_PARAM_MISMATCH = 3,
  60. FCP_TMF_REJECTED = 4,
  61. FCP_TMF_FAILED = 5,
  62. FCP_TMF_INVALID_LUN = 9,
  63. };
  64. /*
  65. * fc_pri_ta from scsi/fc/fc_fcp.h
  66. */
  67. #define FCP_PTA_SIMPLE 0 /* simple task attribute */
  68. #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
  69. #define FCP_PTA_ORDERED 2 /* ordered task attribute */
  70. #define FCP_PTA_ACA 4 /* auto. contigent allegiance */
  71. #define FCP_PTA_MASK 7 /* mask for task attribute field */
  72. #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
  73. #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
  74. /*
  75. * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
  76. * must be called under HW lock and could unlock/lock it inside.
  77. * It isn't an issue, since in the current implementation on the time when
  78. * those functions are called:
  79. *
  80. * - Either context is IRQ and only IRQ handler can modify HW data,
  81. * including rings related fields,
  82. *
  83. * - Or access to target mode variables from struct qla_tgt doesn't
  84. * cross those functions boundaries, except tgt_stop, which
  85. * additionally protected by irq_cmd_count.
  86. */
  87. /* Predefs for callbacks handed to qla2xxx LLD */
  88. static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
  89. struct atio_from_isp *pkt);
  90. static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
  91. static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
  92. int fn, void *iocb, int flags);
  93. static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
  94. *cmd, struct atio_from_isp *atio, int ha_locked);
  95. static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
  96. struct qla_tgt_srr_imm *imm, int ha_lock);
  97. /*
  98. * Global Variables
  99. */
  100. static struct kmem_cache *qla_tgt_cmd_cachep;
  101. static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
  102. static mempool_t *qla_tgt_mgmt_cmd_mempool;
  103. static struct workqueue_struct *qla_tgt_wq;
  104. static DEFINE_MUTEX(qla_tgt_mutex);
  105. static LIST_HEAD(qla_tgt_glist);
  106. /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
  107. static struct qla_tgt_sess *qlt_find_sess_by_port_name(
  108. struct qla_tgt *tgt,
  109. const uint8_t *port_name)
  110. {
  111. struct qla_tgt_sess *sess;
  112. list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
  113. if (!memcmp(sess->port_name, port_name, WWN_SIZE))
  114. return sess;
  115. }
  116. return NULL;
  117. }
  118. /* Might release hw lock, then reaquire!! */
  119. static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
  120. {
  121. /* Send marker if required */
  122. if (unlikely(vha->marker_needed != 0)) {
  123. int rc = qla2x00_issue_marker(vha, vha_locked);
  124. if (rc != QLA_SUCCESS) {
  125. ql_dbg(ql_dbg_tgt, vha, 0xe03d,
  126. "qla_target(%d): issue_marker() failed\n",
  127. vha->vp_idx);
  128. }
  129. return rc;
  130. }
  131. return QLA_SUCCESS;
  132. }
  133. static inline
  134. struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
  135. uint8_t *d_id)
  136. {
  137. struct qla_hw_data *ha = vha->hw;
  138. uint8_t vp_idx;
  139. if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
  140. return NULL;
  141. if (vha->d_id.b.al_pa == d_id[2])
  142. return vha;
  143. BUG_ON(ha->tgt.tgt_vp_map == NULL);
  144. vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
  145. if (likely(test_bit(vp_idx, ha->vp_idx_map)))
  146. return ha->tgt.tgt_vp_map[vp_idx].vha;
  147. return NULL;
  148. }
  149. static inline
  150. struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
  151. uint16_t vp_idx)
  152. {
  153. struct qla_hw_data *ha = vha->hw;
  154. if (vha->vp_idx == vp_idx)
  155. return vha;
  156. BUG_ON(ha->tgt.tgt_vp_map == NULL);
  157. if (likely(test_bit(vp_idx, ha->vp_idx_map)))
  158. return ha->tgt.tgt_vp_map[vp_idx].vha;
  159. return NULL;
  160. }
  161. void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
  162. struct atio_from_isp *atio)
  163. {
  164. switch (atio->u.raw.entry_type) {
  165. case ATIO_TYPE7:
  166. {
  167. struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
  168. atio->u.isp24.fcp_hdr.d_id);
  169. if (unlikely(NULL == host)) {
  170. ql_dbg(ql_dbg_tgt, vha, 0xe03e,
  171. "qla_target(%d): Received ATIO_TYPE7 "
  172. "with unknown d_id %x:%x:%x\n", vha->vp_idx,
  173. atio->u.isp24.fcp_hdr.d_id[0],
  174. atio->u.isp24.fcp_hdr.d_id[1],
  175. atio->u.isp24.fcp_hdr.d_id[2]);
  176. break;
  177. }
  178. qlt_24xx_atio_pkt(host, atio);
  179. break;
  180. }
  181. case IMMED_NOTIFY_TYPE:
  182. {
  183. struct scsi_qla_host *host = vha;
  184. struct imm_ntfy_from_isp *entry =
  185. (struct imm_ntfy_from_isp *)atio;
  186. if ((entry->u.isp24.vp_index != 0xFF) &&
  187. (entry->u.isp24.nport_handle != 0xFFFF)) {
  188. host = qlt_find_host_by_vp_idx(vha,
  189. entry->u.isp24.vp_index);
  190. if (unlikely(!host)) {
  191. ql_dbg(ql_dbg_tgt, vha, 0xe03f,
  192. "qla_target(%d): Received "
  193. "ATIO (IMMED_NOTIFY_TYPE) "
  194. "with unknown vp_index %d\n",
  195. vha->vp_idx, entry->u.isp24.vp_index);
  196. break;
  197. }
  198. }
  199. qlt_24xx_atio_pkt(host, atio);
  200. break;
  201. }
  202. default:
  203. ql_dbg(ql_dbg_tgt, vha, 0xe040,
  204. "qla_target(%d): Received unknown ATIO atio "
  205. "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
  206. break;
  207. }
  208. return;
  209. }
  210. void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
  211. {
  212. switch (pkt->entry_type) {
  213. case CTIO_TYPE7:
  214. {
  215. struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
  216. struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
  217. entry->vp_index);
  218. if (unlikely(!host)) {
  219. ql_dbg(ql_dbg_tgt, vha, 0xe041,
  220. "qla_target(%d): Response pkt (CTIO_TYPE7) "
  221. "received, with unknown vp_index %d\n",
  222. vha->vp_idx, entry->vp_index);
  223. break;
  224. }
  225. qlt_response_pkt(host, pkt);
  226. break;
  227. }
  228. case IMMED_NOTIFY_TYPE:
  229. {
  230. struct scsi_qla_host *host = vha;
  231. struct imm_ntfy_from_isp *entry =
  232. (struct imm_ntfy_from_isp *)pkt;
  233. host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
  234. if (unlikely(!host)) {
  235. ql_dbg(ql_dbg_tgt, vha, 0xe042,
  236. "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
  237. "received, with unknown vp_index %d\n",
  238. vha->vp_idx, entry->u.isp24.vp_index);
  239. break;
  240. }
  241. qlt_response_pkt(host, pkt);
  242. break;
  243. }
  244. case NOTIFY_ACK_TYPE:
  245. {
  246. struct scsi_qla_host *host = vha;
  247. struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
  248. if (0xFF != entry->u.isp24.vp_index) {
  249. host = qlt_find_host_by_vp_idx(vha,
  250. entry->u.isp24.vp_index);
  251. if (unlikely(!host)) {
  252. ql_dbg(ql_dbg_tgt, vha, 0xe043,
  253. "qla_target(%d): Response "
  254. "pkt (NOTIFY_ACK_TYPE) "
  255. "received, with unknown "
  256. "vp_index %d\n", vha->vp_idx,
  257. entry->u.isp24.vp_index);
  258. break;
  259. }
  260. }
  261. qlt_response_pkt(host, pkt);
  262. break;
  263. }
  264. case ABTS_RECV_24XX:
  265. {
  266. struct abts_recv_from_24xx *entry =
  267. (struct abts_recv_from_24xx *)pkt;
  268. struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
  269. entry->vp_index);
  270. if (unlikely(!host)) {
  271. ql_dbg(ql_dbg_tgt, vha, 0xe044,
  272. "qla_target(%d): Response pkt "
  273. "(ABTS_RECV_24XX) received, with unknown "
  274. "vp_index %d\n", vha->vp_idx, entry->vp_index);
  275. break;
  276. }
  277. qlt_response_pkt(host, pkt);
  278. break;
  279. }
  280. case ABTS_RESP_24XX:
  281. {
  282. struct abts_resp_to_24xx *entry =
  283. (struct abts_resp_to_24xx *)pkt;
  284. struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
  285. entry->vp_index);
  286. if (unlikely(!host)) {
  287. ql_dbg(ql_dbg_tgt, vha, 0xe045,
  288. "qla_target(%d): Response pkt "
  289. "(ABTS_RECV_24XX) received, with unknown "
  290. "vp_index %d\n", vha->vp_idx, entry->vp_index);
  291. break;
  292. }
  293. qlt_response_pkt(host, pkt);
  294. break;
  295. }
  296. default:
  297. qlt_response_pkt(vha, pkt);
  298. break;
  299. }
  300. }
  301. static void qlt_free_session_done(struct work_struct *work)
  302. {
  303. struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
  304. free_work);
  305. struct qla_tgt *tgt = sess->tgt;
  306. struct scsi_qla_host *vha = sess->vha;
  307. struct qla_hw_data *ha = vha->hw;
  308. BUG_ON(!tgt);
  309. /*
  310. * Release the target session for FC Nexus from fabric module code.
  311. */
  312. if (sess->se_sess != NULL)
  313. ha->tgt.tgt_ops->free_session(sess);
  314. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
  315. "Unregistration of sess %p finished\n", sess);
  316. kfree(sess);
  317. /*
  318. * We need to protect against race, when tgt is freed before or
  319. * inside wake_up()
  320. */
  321. tgt->sess_count--;
  322. if (tgt->sess_count == 0)
  323. wake_up_all(&tgt->waitQ);
  324. }
  325. /* ha->hardware_lock supposed to be held on entry */
  326. void qlt_unreg_sess(struct qla_tgt_sess *sess)
  327. {
  328. struct scsi_qla_host *vha = sess->vha;
  329. vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
  330. list_del(&sess->sess_list_entry);
  331. if (sess->deleted)
  332. list_del(&sess->del_list_entry);
  333. INIT_WORK(&sess->free_work, qlt_free_session_done);
  334. schedule_work(&sess->free_work);
  335. }
  336. EXPORT_SYMBOL(qlt_unreg_sess);
  337. /* ha->hardware_lock supposed to be held on entry */
  338. static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
  339. {
  340. struct qla_hw_data *ha = vha->hw;
  341. struct qla_tgt_sess *sess = NULL;
  342. uint32_t unpacked_lun, lun = 0;
  343. uint16_t loop_id;
  344. int res = 0;
  345. struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
  346. struct atio_from_isp *a = (struct atio_from_isp *)iocb;
  347. loop_id = le16_to_cpu(n->u.isp24.nport_handle);
  348. if (loop_id == 0xFFFF) {
  349. #if 0 /* FIXME: Re-enable Global event handling.. */
  350. /* Global event */
  351. atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
  352. qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
  353. if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
  354. sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
  355. typeof(*sess), sess_list_entry);
  356. switch (mcmd) {
  357. case QLA_TGT_NEXUS_LOSS_SESS:
  358. mcmd = QLA_TGT_NEXUS_LOSS;
  359. break;
  360. case QLA_TGT_ABORT_ALL_SESS:
  361. mcmd = QLA_TGT_ABORT_ALL;
  362. break;
  363. case QLA_TGT_NEXUS_LOSS:
  364. case QLA_TGT_ABORT_ALL:
  365. break;
  366. default:
  367. ql_dbg(ql_dbg_tgt, vha, 0xe046,
  368. "qla_target(%d): Not allowed "
  369. "command %x in %s", vha->vp_idx,
  370. mcmd, __func__);
  371. sess = NULL;
  372. break;
  373. }
  374. } else
  375. sess = NULL;
  376. #endif
  377. } else {
  378. sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
  379. }
  380. ql_dbg(ql_dbg_tgt, vha, 0xe000,
  381. "Using sess for qla_tgt_reset: %p\n", sess);
  382. if (!sess) {
  383. res = -ESRCH;
  384. return res;
  385. }
  386. ql_dbg(ql_dbg_tgt, vha, 0xe047,
  387. "scsi(%ld): resetting (session %p from port "
  388. "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
  389. "mcmd %x, loop_id %d)\n", vha->host_no, sess,
  390. sess->port_name[0], sess->port_name[1],
  391. sess->port_name[2], sess->port_name[3],
  392. sess->port_name[4], sess->port_name[5],
  393. sess->port_name[6], sess->port_name[7],
  394. mcmd, loop_id);
  395. lun = a->u.isp24.fcp_cmnd.lun;
  396. unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
  397. return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
  398. iocb, QLA24XX_MGMT_SEND_NACK);
  399. }
  400. /* ha->hardware_lock supposed to be held on entry */
  401. static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
  402. bool immediate)
  403. {
  404. struct qla_tgt *tgt = sess->tgt;
  405. uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
  406. if (sess->deleted)
  407. return;
  408. ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
  409. "Scheduling sess %p for deletion\n", sess);
  410. list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
  411. sess->deleted = 1;
  412. if (immediate)
  413. dev_loss_tmo = 0;
  414. sess->expires = jiffies + dev_loss_tmo * HZ;
  415. ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
  416. "qla_target(%d): session for port %02x:%02x:%02x:"
  417. "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
  418. "deletion in %u secs (expires: %lu) immed: %d\n",
  419. sess->vha->vp_idx,
  420. sess->port_name[0], sess->port_name[1],
  421. sess->port_name[2], sess->port_name[3],
  422. sess->port_name[4], sess->port_name[5],
  423. sess->port_name[6], sess->port_name[7],
  424. sess->loop_id, dev_loss_tmo, sess->expires, immediate);
  425. if (immediate)
  426. schedule_delayed_work(&tgt->sess_del_work, 0);
  427. else
  428. schedule_delayed_work(&tgt->sess_del_work,
  429. jiffies - sess->expires);
  430. }
  431. /* ha->hardware_lock supposed to be held on entry */
  432. static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
  433. {
  434. struct qla_tgt_sess *sess;
  435. list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
  436. qlt_schedule_sess_for_deletion(sess, true);
  437. /* At this point tgt could be already dead */
  438. }
  439. static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
  440. uint16_t *loop_id)
  441. {
  442. struct qla_hw_data *ha = vha->hw;
  443. dma_addr_t gid_list_dma;
  444. struct gid_list_info *gid_list;
  445. char *id_iter;
  446. int res, rc, i;
  447. uint16_t entries;
  448. gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  449. &gid_list_dma, GFP_KERNEL);
  450. if (!gid_list) {
  451. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
  452. "qla_target(%d): DMA Alloc failed of %u\n",
  453. vha->vp_idx, qla2x00_gid_list_size(ha));
  454. return -ENOMEM;
  455. }
  456. /* Get list of logged in devices */
  457. rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
  458. if (rc != QLA_SUCCESS) {
  459. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
  460. "qla_target(%d): get_id_list() failed: %x\n",
  461. vha->vp_idx, rc);
  462. res = -1;
  463. goto out_free_id_list;
  464. }
  465. id_iter = (char *)gid_list;
  466. res = -1;
  467. for (i = 0; i < entries; i++) {
  468. struct gid_list_info *gid = (struct gid_list_info *)id_iter;
  469. if ((gid->al_pa == s_id[2]) &&
  470. (gid->area == s_id[1]) &&
  471. (gid->domain == s_id[0])) {
  472. *loop_id = le16_to_cpu(gid->loop_id);
  473. res = 0;
  474. break;
  475. }
  476. id_iter += ha->gid_list_info_size;
  477. }
  478. out_free_id_list:
  479. dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  480. gid_list, gid_list_dma);
  481. return res;
  482. }
  483. static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
  484. struct qla_tgt_sess *sess)
  485. {
  486. struct qla_hw_data *ha = vha->hw;
  487. struct qla_port_24xx_data *pmap24;
  488. bool res, found = false;
  489. int rc, i;
  490. uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
  491. uint16_t entries;
  492. void *pmap;
  493. int pmap_len;
  494. fc_port_t *fcport;
  495. int global_resets;
  496. retry:
  497. global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
  498. rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
  499. if (rc != QLA_SUCCESS) {
  500. res = false;
  501. goto out;
  502. }
  503. pmap24 = pmap;
  504. entries = pmap_len/sizeof(*pmap24);
  505. for (i = 0; i < entries; ++i) {
  506. if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
  507. loop_id = le16_to_cpu(pmap24[i].loop_id);
  508. found = true;
  509. break;
  510. }
  511. }
  512. kfree(pmap);
  513. if (!found) {
  514. res = false;
  515. goto out;
  516. }
  517. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
  518. "qlt_check_fcport_exist(): loop_id %d", loop_id);
  519. fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
  520. if (fcport == NULL) {
  521. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
  522. "qla_target(%d): Allocation of tmp FC port failed",
  523. vha->vp_idx);
  524. res = false;
  525. goto out;
  526. }
  527. fcport->loop_id = loop_id;
  528. rc = qla2x00_get_port_database(vha, fcport, 0);
  529. if (rc != QLA_SUCCESS) {
  530. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
  531. "qla_target(%d): Failed to retrieve fcport "
  532. "information -- get_port_database() returned %x "
  533. "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
  534. res = false;
  535. goto out_free_fcport;
  536. }
  537. if (global_resets !=
  538. atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
  539. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
  540. "qla_target(%d): global reset during session discovery"
  541. " (counter was %d, new %d), retrying",
  542. vha->vp_idx, global_resets,
  543. atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
  544. goto retry;
  545. }
  546. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
  547. "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
  548. "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
  549. sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
  550. fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
  551. sess->s_id = fcport->d_id;
  552. sess->loop_id = fcport->loop_id;
  553. sess->conf_compl_supported = !!(fcport->flags &
  554. FCF_CONF_COMP_SUPPORTED);
  555. res = true;
  556. out_free_fcport:
  557. kfree(fcport);
  558. out:
  559. return res;
  560. }
  561. /* ha->hardware_lock supposed to be held on entry */
  562. static void qlt_undelete_sess(struct qla_tgt_sess *sess)
  563. {
  564. BUG_ON(!sess->deleted);
  565. list_del(&sess->del_list_entry);
  566. sess->deleted = 0;
  567. }
  568. static void qlt_del_sess_work_fn(struct delayed_work *work)
  569. {
  570. struct qla_tgt *tgt = container_of(work, struct qla_tgt,
  571. sess_del_work);
  572. struct scsi_qla_host *vha = tgt->vha;
  573. struct qla_hw_data *ha = vha->hw;
  574. struct qla_tgt_sess *sess;
  575. unsigned long flags;
  576. spin_lock_irqsave(&ha->hardware_lock, flags);
  577. while (!list_empty(&tgt->del_sess_list)) {
  578. sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
  579. del_list_entry);
  580. if (time_after_eq(jiffies, sess->expires)) {
  581. bool cancel;
  582. qlt_undelete_sess(sess);
  583. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  584. cancel = qlt_check_fcport_exist(vha, sess);
  585. if (cancel) {
  586. if (sess->deleted) {
  587. /*
  588. * sess was again deleted while we were
  589. * discovering it
  590. */
  591. spin_lock_irqsave(&ha->hardware_lock,
  592. flags);
  593. continue;
  594. }
  595. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
  596. "qla_target(%d): cancel deletion of "
  597. "session for port %02x:%02x:%02x:%02x:%02x:"
  598. "%02x:%02x:%02x (loop ID %d), because "
  599. " it isn't deleted by firmware",
  600. vha->vp_idx, sess->port_name[0],
  601. sess->port_name[1], sess->port_name[2],
  602. sess->port_name[3], sess->port_name[4],
  603. sess->port_name[5], sess->port_name[6],
  604. sess->port_name[7], sess->loop_id);
  605. } else {
  606. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
  607. "Timeout: sess %p about to be deleted\n",
  608. sess);
  609. ha->tgt.tgt_ops->shutdown_sess(sess);
  610. ha->tgt.tgt_ops->put_sess(sess);
  611. }
  612. spin_lock_irqsave(&ha->hardware_lock, flags);
  613. } else {
  614. schedule_delayed_work(&tgt->sess_del_work,
  615. jiffies - sess->expires);
  616. break;
  617. }
  618. }
  619. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  620. }
  621. /*
  622. * Adds an extra ref to allow to drop hw lock after adding sess to the list.
  623. * Caller must put it.
  624. */
  625. static struct qla_tgt_sess *qlt_create_sess(
  626. struct scsi_qla_host *vha,
  627. fc_port_t *fcport,
  628. bool local)
  629. {
  630. struct qla_hw_data *ha = vha->hw;
  631. struct qla_tgt_sess *sess;
  632. unsigned long flags;
  633. unsigned char be_sid[3];
  634. /* Check to avoid double sessions */
  635. spin_lock_irqsave(&ha->hardware_lock, flags);
  636. list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
  637. sess_list_entry) {
  638. if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
  639. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
  640. "Double sess %p found (s_id %x:%x:%x, "
  641. "loop_id %d), updating to d_id %x:%x:%x, "
  642. "loop_id %d", sess, sess->s_id.b.domain,
  643. sess->s_id.b.al_pa, sess->s_id.b.area,
  644. sess->loop_id, fcport->d_id.b.domain,
  645. fcport->d_id.b.al_pa, fcport->d_id.b.area,
  646. fcport->loop_id);
  647. if (sess->deleted)
  648. qlt_undelete_sess(sess);
  649. kref_get(&sess->se_sess->sess_kref);
  650. sess->s_id = fcport->d_id;
  651. sess->loop_id = fcport->loop_id;
  652. sess->conf_compl_supported = !!(fcport->flags &
  653. FCF_CONF_COMP_SUPPORTED);
  654. if (sess->local && !local)
  655. sess->local = 0;
  656. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  657. return sess;
  658. }
  659. }
  660. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  661. sess = kzalloc(sizeof(*sess), GFP_KERNEL);
  662. if (!sess) {
  663. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
  664. "qla_target(%u): session allocation failed, "
  665. "all commands from port %02x:%02x:%02x:%02x:"
  666. "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
  667. fcport->port_name[0], fcport->port_name[1],
  668. fcport->port_name[2], fcport->port_name[3],
  669. fcport->port_name[4], fcport->port_name[5],
  670. fcport->port_name[6], fcport->port_name[7]);
  671. return NULL;
  672. }
  673. sess->tgt = ha->tgt.qla_tgt;
  674. sess->vha = vha;
  675. sess->s_id = fcport->d_id;
  676. sess->loop_id = fcport->loop_id;
  677. sess->local = local;
  678. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
  679. "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
  680. sess, ha->tgt.qla_tgt);
  681. be_sid[0] = sess->s_id.b.domain;
  682. be_sid[1] = sess->s_id.b.area;
  683. be_sid[2] = sess->s_id.b.al_pa;
  684. /*
  685. * Determine if this fc_port->port_name is allowed to access
  686. * target mode using explict NodeACLs+MappedLUNs, or using
  687. * TPG demo mode. If this is successful a target mode FC nexus
  688. * is created.
  689. */
  690. if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
  691. &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
  692. kfree(sess);
  693. return NULL;
  694. }
  695. /*
  696. * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
  697. * access across ->hardware_lock reaquire.
  698. */
  699. kref_get(&sess->se_sess->sess_kref);
  700. sess->conf_compl_supported = !!(fcport->flags &
  701. FCF_CONF_COMP_SUPPORTED);
  702. BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
  703. memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
  704. spin_lock_irqsave(&ha->hardware_lock, flags);
  705. list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
  706. ha->tgt.qla_tgt->sess_count++;
  707. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  708. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
  709. "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
  710. "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
  711. " completion %ssupported) added\n",
  712. vha->vp_idx, local ? "local " : "", fcport->port_name[0],
  713. fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
  714. fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
  715. fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
  716. sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
  717. "" : "not ");
  718. return sess;
  719. }
  720. /*
  721. * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
  722. */
  723. void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
  724. {
  725. struct qla_hw_data *ha = vha->hw;
  726. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  727. struct qla_tgt_sess *sess;
  728. unsigned long flags;
  729. if (!vha->hw->tgt.tgt_ops)
  730. return;
  731. if (!tgt || (fcport->port_type != FCT_INITIATOR))
  732. return;
  733. spin_lock_irqsave(&ha->hardware_lock, flags);
  734. if (tgt->tgt_stop) {
  735. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  736. return;
  737. }
  738. sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
  739. if (!sess) {
  740. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  741. mutex_lock(&ha->tgt.tgt_mutex);
  742. sess = qlt_create_sess(vha, fcport, false);
  743. mutex_unlock(&ha->tgt.tgt_mutex);
  744. spin_lock_irqsave(&ha->hardware_lock, flags);
  745. } else {
  746. kref_get(&sess->se_sess->sess_kref);
  747. if (sess->deleted) {
  748. qlt_undelete_sess(sess);
  749. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
  750. "qla_target(%u): %ssession for port %02x:"
  751. "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
  752. "reappeared\n", vha->vp_idx, sess->local ? "local "
  753. : "", sess->port_name[0], sess->port_name[1],
  754. sess->port_name[2], sess->port_name[3],
  755. sess->port_name[4], sess->port_name[5],
  756. sess->port_name[6], sess->port_name[7],
  757. sess->loop_id);
  758. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
  759. "Reappeared sess %p\n", sess);
  760. }
  761. sess->s_id = fcport->d_id;
  762. sess->loop_id = fcport->loop_id;
  763. sess->conf_compl_supported = !!(fcport->flags &
  764. FCF_CONF_COMP_SUPPORTED);
  765. }
  766. if (sess && sess->local) {
  767. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
  768. "qla_target(%u): local session for "
  769. "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
  770. "(loop ID %d) became global\n", vha->vp_idx,
  771. fcport->port_name[0], fcport->port_name[1],
  772. fcport->port_name[2], fcport->port_name[3],
  773. fcport->port_name[4], fcport->port_name[5],
  774. fcport->port_name[6], fcport->port_name[7],
  775. sess->loop_id);
  776. sess->local = 0;
  777. }
  778. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  779. ha->tgt.tgt_ops->put_sess(sess);
  780. }
  781. void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
  782. {
  783. struct qla_hw_data *ha = vha->hw;
  784. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  785. struct qla_tgt_sess *sess;
  786. unsigned long flags;
  787. if (!vha->hw->tgt.tgt_ops)
  788. return;
  789. if (!tgt || (fcport->port_type != FCT_INITIATOR))
  790. return;
  791. spin_lock_irqsave(&ha->hardware_lock, flags);
  792. if (tgt->tgt_stop) {
  793. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  794. return;
  795. }
  796. sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
  797. if (!sess) {
  798. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  799. return;
  800. }
  801. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
  802. sess->local = 1;
  803. qlt_schedule_sess_for_deletion(sess, false);
  804. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  805. }
  806. static inline int test_tgt_sess_count(struct qla_tgt *tgt)
  807. {
  808. struct qla_hw_data *ha = tgt->ha;
  809. unsigned long flags;
  810. int res;
  811. /*
  812. * We need to protect against race, when tgt is freed before or
  813. * inside wake_up()
  814. */
  815. spin_lock_irqsave(&ha->hardware_lock, flags);
  816. ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
  817. "tgt %p, empty(sess_list)=%d sess_count=%d\n",
  818. tgt, list_empty(&tgt->sess_list), tgt->sess_count);
  819. res = (tgt->sess_count == 0);
  820. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  821. return res;
  822. }
  823. /* Called by tcm_qla2xxx configfs code */
  824. void qlt_stop_phase1(struct qla_tgt *tgt)
  825. {
  826. struct scsi_qla_host *vha = tgt->vha;
  827. struct qla_hw_data *ha = tgt->ha;
  828. unsigned long flags;
  829. if (tgt->tgt_stop || tgt->tgt_stopped) {
  830. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
  831. "Already in tgt->tgt_stop or tgt_stopped state\n");
  832. dump_stack();
  833. return;
  834. }
  835. ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
  836. vha->host_no, vha);
  837. /*
  838. * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
  839. * Lock is needed, because we still can get an incoming packet.
  840. */
  841. mutex_lock(&ha->tgt.tgt_mutex);
  842. spin_lock_irqsave(&ha->hardware_lock, flags);
  843. tgt->tgt_stop = 1;
  844. qlt_clear_tgt_db(tgt, true);
  845. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  846. mutex_unlock(&ha->tgt.tgt_mutex);
  847. flush_delayed_work_sync(&tgt->sess_del_work);
  848. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
  849. "Waiting for sess works (tgt %p)", tgt);
  850. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  851. while (!list_empty(&tgt->sess_works_list)) {
  852. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  853. flush_scheduled_work();
  854. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  855. }
  856. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  857. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
  858. "Waiting for tgt %p: list_empty(sess_list)=%d "
  859. "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
  860. tgt->sess_count);
  861. wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
  862. /* Big hammer */
  863. if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
  864. qlt_disable_vha(vha);
  865. /* Wait for sessions to clear out (just in case) */
  866. wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
  867. }
  868. EXPORT_SYMBOL(qlt_stop_phase1);
  869. /* Called by tcm_qla2xxx configfs code */
  870. void qlt_stop_phase2(struct qla_tgt *tgt)
  871. {
  872. struct qla_hw_data *ha = tgt->ha;
  873. unsigned long flags;
  874. if (tgt->tgt_stopped) {
  875. ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
  876. "Already in tgt->tgt_stopped state\n");
  877. dump_stack();
  878. return;
  879. }
  880. ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
  881. "Waiting for %d IRQ commands to complete (tgt %p)",
  882. tgt->irq_cmd_count, tgt);
  883. mutex_lock(&ha->tgt.tgt_mutex);
  884. spin_lock_irqsave(&ha->hardware_lock, flags);
  885. while (tgt->irq_cmd_count != 0) {
  886. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  887. udelay(2);
  888. spin_lock_irqsave(&ha->hardware_lock, flags);
  889. }
  890. tgt->tgt_stop = 0;
  891. tgt->tgt_stopped = 1;
  892. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  893. mutex_unlock(&ha->tgt.tgt_mutex);
  894. ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
  895. tgt);
  896. }
  897. EXPORT_SYMBOL(qlt_stop_phase2);
  898. /* Called from qlt_remove_target() -> qla2x00_remove_one() */
  899. void qlt_release(struct qla_tgt *tgt)
  900. {
  901. struct qla_hw_data *ha = tgt->ha;
  902. if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
  903. qlt_stop_phase2(tgt);
  904. ha->tgt.qla_tgt = NULL;
  905. ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
  906. "Release of tgt %p finished\n", tgt);
  907. kfree(tgt);
  908. }
  909. /* ha->hardware_lock supposed to be held on entry */
  910. static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
  911. const void *param, unsigned int param_size)
  912. {
  913. struct qla_tgt_sess_work_param *prm;
  914. unsigned long flags;
  915. prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
  916. if (!prm) {
  917. ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
  918. "qla_target(%d): Unable to create session "
  919. "work, command will be refused", 0);
  920. return -ENOMEM;
  921. }
  922. ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
  923. "Scheduling work (type %d, prm %p)"
  924. " to find session for param %p (size %d, tgt %p)\n",
  925. type, prm, param, param_size, tgt);
  926. prm->type = type;
  927. memcpy(&prm->tm_iocb, param, param_size);
  928. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  929. list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
  930. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  931. schedule_work(&tgt->sess_work);
  932. return 0;
  933. }
  934. /*
  935. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  936. */
  937. static void qlt_send_notify_ack(struct scsi_qla_host *vha,
  938. struct imm_ntfy_from_isp *ntfy,
  939. uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
  940. uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
  941. {
  942. struct qla_hw_data *ha = vha->hw;
  943. request_t *pkt;
  944. struct nack_to_isp *nack;
  945. ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
  946. /* Send marker if required */
  947. if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
  948. return;
  949. pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
  950. if (!pkt) {
  951. ql_dbg(ql_dbg_tgt, vha, 0xe049,
  952. "qla_target(%d): %s failed: unable to allocate "
  953. "request packet\n", vha->vp_idx, __func__);
  954. return;
  955. }
  956. if (ha->tgt.qla_tgt != NULL)
  957. ha->tgt.qla_tgt->notify_ack_expected++;
  958. pkt->entry_type = NOTIFY_ACK_TYPE;
  959. pkt->entry_count = 1;
  960. nack = (struct nack_to_isp *)pkt;
  961. nack->ox_id = ntfy->ox_id;
  962. nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
  963. if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
  964. nack->u.isp24.flags = ntfy->u.isp24.flags &
  965. __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
  966. }
  967. nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
  968. nack->u.isp24.status = ntfy->u.isp24.status;
  969. nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
  970. nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
  971. nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
  972. nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
  973. nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
  974. nack->u.isp24.srr_reject_code = srr_reject_code;
  975. nack->u.isp24.srr_reject_code_expl = srr_explan;
  976. nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
  977. ql_dbg(ql_dbg_tgt, vha, 0xe005,
  978. "qla_target(%d): Sending 24xx Notify Ack %d\n",
  979. vha->vp_idx, nack->u.isp24.status);
  980. qla2x00_start_iocbs(vha, vha->req);
  981. }
  982. /*
  983. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  984. */
  985. static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
  986. struct abts_recv_from_24xx *abts, uint32_t status,
  987. bool ids_reversed)
  988. {
  989. struct qla_hw_data *ha = vha->hw;
  990. struct abts_resp_to_24xx *resp;
  991. uint32_t f_ctl;
  992. uint8_t *p;
  993. ql_dbg(ql_dbg_tgt, vha, 0xe006,
  994. "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
  995. ha, abts, status);
  996. /* Send marker if required */
  997. if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
  998. return;
  999. resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
  1000. if (!resp) {
  1001. ql_dbg(ql_dbg_tgt, vha, 0xe04a,
  1002. "qla_target(%d): %s failed: unable to allocate "
  1003. "request packet", vha->vp_idx, __func__);
  1004. return;
  1005. }
  1006. resp->entry_type = ABTS_RESP_24XX;
  1007. resp->entry_count = 1;
  1008. resp->nport_handle = abts->nport_handle;
  1009. resp->vp_index = vha->vp_idx;
  1010. resp->sof_type = abts->sof_type;
  1011. resp->exchange_address = abts->exchange_address;
  1012. resp->fcp_hdr_le = abts->fcp_hdr_le;
  1013. f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
  1014. F_CTL_LAST_SEQ | F_CTL_END_SEQ |
  1015. F_CTL_SEQ_INITIATIVE);
  1016. p = (uint8_t *)&f_ctl;
  1017. resp->fcp_hdr_le.f_ctl[0] = *p++;
  1018. resp->fcp_hdr_le.f_ctl[1] = *p++;
  1019. resp->fcp_hdr_le.f_ctl[2] = *p;
  1020. if (ids_reversed) {
  1021. resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
  1022. resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
  1023. resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
  1024. resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
  1025. resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
  1026. resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
  1027. } else {
  1028. resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
  1029. resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
  1030. resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
  1031. resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
  1032. resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
  1033. resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
  1034. }
  1035. resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
  1036. if (status == FCP_TMF_CMPL) {
  1037. resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
  1038. resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
  1039. resp->payload.ba_acct.low_seq_cnt = 0x0000;
  1040. resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
  1041. resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
  1042. resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
  1043. } else {
  1044. resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
  1045. resp->payload.ba_rjt.reason_code =
  1046. BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
  1047. /* Other bytes are zero */
  1048. }
  1049. ha->tgt.qla_tgt->abts_resp_expected++;
  1050. qla2x00_start_iocbs(vha, vha->req);
  1051. }
  1052. /*
  1053. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  1054. */
  1055. static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
  1056. struct abts_resp_from_24xx_fw *entry)
  1057. {
  1058. struct ctio7_to_24xx *ctio;
  1059. ql_dbg(ql_dbg_tgt, vha, 0xe007,
  1060. "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
  1061. /* Send marker if required */
  1062. if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
  1063. return;
  1064. ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
  1065. if (ctio == NULL) {
  1066. ql_dbg(ql_dbg_tgt, vha, 0xe04b,
  1067. "qla_target(%d): %s failed: unable to allocate "
  1068. "request packet\n", vha->vp_idx, __func__);
  1069. return;
  1070. }
  1071. /*
  1072. * We've got on entrance firmware's response on by us generated
  1073. * ABTS response. So, in it ID fields are reversed.
  1074. */
  1075. ctio->entry_type = CTIO_TYPE7;
  1076. ctio->entry_count = 1;
  1077. ctio->nport_handle = entry->nport_handle;
  1078. ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
  1079. ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
  1080. ctio->vp_index = vha->vp_idx;
  1081. ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
  1082. ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
  1083. ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
  1084. ctio->exchange_addr = entry->exchange_addr_to_abort;
  1085. ctio->u.status1.flags =
  1086. __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
  1087. CTIO7_FLAGS_TERMINATE);
  1088. ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
  1089. qla2x00_start_iocbs(vha, vha->req);
  1090. qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
  1091. FCP_TMF_CMPL, true);
  1092. }
  1093. /* ha->hardware_lock supposed to be held on entry */
  1094. static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
  1095. struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
  1096. {
  1097. struct qla_hw_data *ha = vha->hw;
  1098. struct qla_tgt_mgmt_cmd *mcmd;
  1099. int rc;
  1100. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
  1101. "qla_target(%d): task abort (tag=%d)\n",
  1102. vha->vp_idx, abts->exchange_addr_to_abort);
  1103. mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
  1104. if (mcmd == NULL) {
  1105. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
  1106. "qla_target(%d): %s: Allocation of ABORT cmd failed",
  1107. vha->vp_idx, __func__);
  1108. return -ENOMEM;
  1109. }
  1110. memset(mcmd, 0, sizeof(*mcmd));
  1111. mcmd->sess = sess;
  1112. memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
  1113. rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
  1114. abts->exchange_addr_to_abort);
  1115. if (rc != 0) {
  1116. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
  1117. "qla_target(%d): tgt_ops->handle_tmr()"
  1118. " failed: %d", vha->vp_idx, rc);
  1119. mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
  1120. return -EFAULT;
  1121. }
  1122. return 0;
  1123. }
  1124. /*
  1125. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  1126. */
  1127. static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
  1128. struct abts_recv_from_24xx *abts)
  1129. {
  1130. struct qla_hw_data *ha = vha->hw;
  1131. struct qla_tgt_sess *sess;
  1132. uint32_t tag = abts->exchange_addr_to_abort;
  1133. uint8_t s_id[3];
  1134. int rc;
  1135. if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
  1136. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
  1137. "qla_target(%d): ABTS: Abort Sequence not "
  1138. "supported\n", vha->vp_idx);
  1139. qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
  1140. return;
  1141. }
  1142. if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
  1143. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
  1144. "qla_target(%d): ABTS: Unknown Exchange "
  1145. "Address received\n", vha->vp_idx);
  1146. qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
  1147. return;
  1148. }
  1149. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
  1150. "qla_target(%d): task abort (s_id=%x:%x:%x, "
  1151. "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
  1152. abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
  1153. le32_to_cpu(abts->fcp_hdr_le.parameter));
  1154. s_id[0] = abts->fcp_hdr_le.s_id[2];
  1155. s_id[1] = abts->fcp_hdr_le.s_id[1];
  1156. s_id[2] = abts->fcp_hdr_le.s_id[0];
  1157. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
  1158. if (!sess) {
  1159. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
  1160. "qla_target(%d): task abort for non-existant session\n",
  1161. vha->vp_idx);
  1162. rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
  1163. QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
  1164. if (rc != 0) {
  1165. qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
  1166. false);
  1167. }
  1168. return;
  1169. }
  1170. rc = __qlt_24xx_handle_abts(vha, abts, sess);
  1171. if (rc != 0) {
  1172. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
  1173. "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
  1174. vha->vp_idx, rc);
  1175. qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
  1176. return;
  1177. }
  1178. }
  1179. /*
  1180. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  1181. */
  1182. static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
  1183. struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
  1184. {
  1185. struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
  1186. struct ctio7_to_24xx *ctio;
  1187. ql_dbg(ql_dbg_tgt, ha, 0xe008,
  1188. "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
  1189. ha, atio, resp_code);
  1190. /* Send marker if required */
  1191. if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
  1192. return;
  1193. ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
  1194. if (ctio == NULL) {
  1195. ql_dbg(ql_dbg_tgt, ha, 0xe04c,
  1196. "qla_target(%d): %s failed: unable to allocate "
  1197. "request packet\n", ha->vp_idx, __func__);
  1198. return;
  1199. }
  1200. ctio->entry_type = CTIO_TYPE7;
  1201. ctio->entry_count = 1;
  1202. ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
  1203. ctio->nport_handle = mcmd->sess->loop_id;
  1204. ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
  1205. ctio->vp_index = ha->vp_idx;
  1206. ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
  1207. ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
  1208. ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
  1209. ctio->exchange_addr = atio->u.isp24.exchange_addr;
  1210. ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
  1211. __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
  1212. CTIO7_FLAGS_SEND_STATUS);
  1213. ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
  1214. ctio->u.status1.scsi_status =
  1215. __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
  1216. ctio->u.status1.response_len = __constant_cpu_to_le16(8);
  1217. ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
  1218. qla2x00_start_iocbs(ha, ha->req);
  1219. }
  1220. void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
  1221. {
  1222. mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
  1223. }
  1224. EXPORT_SYMBOL(qlt_free_mcmd);
  1225. /* callback from target fabric module code */
  1226. void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
  1227. {
  1228. struct scsi_qla_host *vha = mcmd->sess->vha;
  1229. struct qla_hw_data *ha = vha->hw;
  1230. unsigned long flags;
  1231. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
  1232. "TM response mcmd (%p) status %#x state %#x",
  1233. mcmd, mcmd->fc_tm_rsp, mcmd->flags);
  1234. spin_lock_irqsave(&ha->hardware_lock, flags);
  1235. if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
  1236. qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
  1237. 0, 0, 0, 0, 0, 0);
  1238. else {
  1239. if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
  1240. qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
  1241. mcmd->fc_tm_rsp, false);
  1242. else
  1243. qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
  1244. mcmd->fc_tm_rsp);
  1245. }
  1246. /*
  1247. * Make the callback for ->free_mcmd() to queue_work() and invoke
  1248. * target_put_sess_cmd() to drop cmd_kref to 1. The final
  1249. * target_put_sess_cmd() call will be made from TFO->check_stop_free()
  1250. * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
  1251. * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
  1252. * qlt_xmit_tm_rsp() returns here..
  1253. */
  1254. ha->tgt.tgt_ops->free_mcmd(mcmd);
  1255. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1256. }
  1257. EXPORT_SYMBOL(qlt_xmit_tm_rsp);
  1258. /* No locks */
  1259. static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
  1260. {
  1261. struct qla_tgt_cmd *cmd = prm->cmd;
  1262. BUG_ON(cmd->sg_cnt == 0);
  1263. prm->sg = (struct scatterlist *)cmd->sg;
  1264. prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
  1265. cmd->sg_cnt, cmd->dma_data_direction);
  1266. if (unlikely(prm->seg_cnt == 0))
  1267. goto out_err;
  1268. prm->cmd->sg_mapped = 1;
  1269. /*
  1270. * If greater than four sg entries then we need to allocate
  1271. * the continuation entries
  1272. */
  1273. if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
  1274. prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
  1275. prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
  1276. ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
  1277. prm->seg_cnt, prm->req_cnt);
  1278. return 0;
  1279. out_err:
  1280. ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
  1281. "qla_target(%d): PCI mapping failed: sg_cnt=%d",
  1282. 0, prm->cmd->sg_cnt);
  1283. return -1;
  1284. }
  1285. static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
  1286. struct qla_tgt_cmd *cmd)
  1287. {
  1288. struct qla_hw_data *ha = vha->hw;
  1289. BUG_ON(!cmd->sg_mapped);
  1290. pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
  1291. cmd->sg_mapped = 0;
  1292. }
  1293. static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
  1294. uint32_t req_cnt)
  1295. {
  1296. struct qla_hw_data *ha = vha->hw;
  1297. device_reg_t __iomem *reg = ha->iobase;
  1298. uint32_t cnt;
  1299. if (vha->req->cnt < (req_cnt + 2)) {
  1300. cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
  1301. ql_dbg(ql_dbg_tgt, vha, 0xe00a,
  1302. "Request ring circled: cnt=%d, vha->->ring_index=%d, "
  1303. "vha->req->cnt=%d, req_cnt=%d\n", cnt,
  1304. vha->req->ring_index, vha->req->cnt, req_cnt);
  1305. if (vha->req->ring_index < cnt)
  1306. vha->req->cnt = cnt - vha->req->ring_index;
  1307. else
  1308. vha->req->cnt = vha->req->length -
  1309. (vha->req->ring_index - cnt);
  1310. }
  1311. if (unlikely(vha->req->cnt < (req_cnt + 2))) {
  1312. ql_dbg(ql_dbg_tgt, vha, 0xe00b,
  1313. "qla_target(%d): There is no room in the "
  1314. "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
  1315. "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
  1316. vha->req->cnt, req_cnt);
  1317. return -EAGAIN;
  1318. }
  1319. vha->req->cnt -= req_cnt;
  1320. return 0;
  1321. }
  1322. /*
  1323. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  1324. */
  1325. static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
  1326. {
  1327. /* Adjust ring index. */
  1328. vha->req->ring_index++;
  1329. if (vha->req->ring_index == vha->req->length) {
  1330. vha->req->ring_index = 0;
  1331. vha->req->ring_ptr = vha->req->ring;
  1332. } else {
  1333. vha->req->ring_ptr++;
  1334. }
  1335. return (cont_entry_t *)vha->req->ring_ptr;
  1336. }
  1337. /* ha->hardware_lock supposed to be held on entry */
  1338. static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
  1339. {
  1340. struct qla_hw_data *ha = vha->hw;
  1341. uint32_t h;
  1342. h = ha->tgt.current_handle;
  1343. /* always increment cmd handle */
  1344. do {
  1345. ++h;
  1346. if (h > MAX_OUTSTANDING_COMMANDS)
  1347. h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
  1348. if (h == ha->tgt.current_handle) {
  1349. ql_dbg(ql_dbg_tgt, vha, 0xe04e,
  1350. "qla_target(%d): Ran out of "
  1351. "empty cmd slots in ha %p\n", vha->vp_idx, ha);
  1352. h = QLA_TGT_NULL_HANDLE;
  1353. break;
  1354. }
  1355. } while ((h == QLA_TGT_NULL_HANDLE) ||
  1356. (h == QLA_TGT_SKIP_HANDLE) ||
  1357. (ha->tgt.cmds[h-1] != NULL));
  1358. if (h != QLA_TGT_NULL_HANDLE)
  1359. ha->tgt.current_handle = h;
  1360. return h;
  1361. }
  1362. /* ha->hardware_lock supposed to be held on entry */
  1363. static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
  1364. struct scsi_qla_host *vha)
  1365. {
  1366. uint32_t h;
  1367. struct ctio7_to_24xx *pkt;
  1368. struct qla_hw_data *ha = vha->hw;
  1369. struct atio_from_isp *atio = &prm->cmd->atio;
  1370. pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
  1371. prm->pkt = pkt;
  1372. memset(pkt, 0, sizeof(*pkt));
  1373. pkt->entry_type = CTIO_TYPE7;
  1374. pkt->entry_count = (uint8_t)prm->req_cnt;
  1375. pkt->vp_index = vha->vp_idx;
  1376. h = qlt_make_handle(vha);
  1377. if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
  1378. /*
  1379. * CTIO type 7 from the firmware doesn't provide a way to
  1380. * know the initiator's LOOP ID, hence we can't find
  1381. * the session and, so, the command.
  1382. */
  1383. return -EAGAIN;
  1384. } else
  1385. ha->tgt.cmds[h-1] = prm->cmd;
  1386. pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
  1387. pkt->nport_handle = prm->cmd->loop_id;
  1388. pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
  1389. pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
  1390. pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
  1391. pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
  1392. pkt->exchange_addr = atio->u.isp24.exchange_addr;
  1393. pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
  1394. pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
  1395. pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
  1396. ql_dbg(ql_dbg_tgt, vha, 0xe00c,
  1397. "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
  1398. vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
  1399. le16_to_cpu(pkt->u.status0.ox_id));
  1400. return 0;
  1401. }
  1402. /*
  1403. * ha->hardware_lock supposed to be held on entry. We have already made sure
  1404. * that there is sufficient amount of request entries to not drop it.
  1405. */
  1406. static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
  1407. struct scsi_qla_host *vha)
  1408. {
  1409. int cnt;
  1410. uint32_t *dword_ptr;
  1411. int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
  1412. /* Build continuation packets */
  1413. while (prm->seg_cnt > 0) {
  1414. cont_a64_entry_t *cont_pkt64 =
  1415. (cont_a64_entry_t *)qlt_get_req_pkt(vha);
  1416. /*
  1417. * Make sure that from cont_pkt64 none of
  1418. * 64-bit specific fields used for 32-bit
  1419. * addressing. Cast to (cont_entry_t *) for
  1420. * that.
  1421. */
  1422. memset(cont_pkt64, 0, sizeof(*cont_pkt64));
  1423. cont_pkt64->entry_count = 1;
  1424. cont_pkt64->sys_define = 0;
  1425. if (enable_64bit_addressing) {
  1426. cont_pkt64->entry_type = CONTINUE_A64_TYPE;
  1427. dword_ptr =
  1428. (uint32_t *)&cont_pkt64->dseg_0_address;
  1429. } else {
  1430. cont_pkt64->entry_type = CONTINUE_TYPE;
  1431. dword_ptr =
  1432. (uint32_t *)&((cont_entry_t *)
  1433. cont_pkt64)->dseg_0_address;
  1434. }
  1435. /* Load continuation entry data segments */
  1436. for (cnt = 0;
  1437. cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
  1438. cnt++, prm->seg_cnt--) {
  1439. *dword_ptr++ =
  1440. cpu_to_le32(pci_dma_lo32
  1441. (sg_dma_address(prm->sg)));
  1442. if (enable_64bit_addressing) {
  1443. *dword_ptr++ =
  1444. cpu_to_le32(pci_dma_hi32
  1445. (sg_dma_address
  1446. (prm->sg)));
  1447. }
  1448. *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
  1449. ql_dbg(ql_dbg_tgt, vha, 0xe00d,
  1450. "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
  1451. (long long unsigned int)
  1452. pci_dma_hi32(sg_dma_address(prm->sg)),
  1453. (long long unsigned int)
  1454. pci_dma_lo32(sg_dma_address(prm->sg)),
  1455. (int)sg_dma_len(prm->sg));
  1456. prm->sg = sg_next(prm->sg);
  1457. }
  1458. }
  1459. }
  1460. /*
  1461. * ha->hardware_lock supposed to be held on entry. We have already made sure
  1462. * that there is sufficient amount of request entries to not drop it.
  1463. */
  1464. static void qlt_load_data_segments(struct qla_tgt_prm *prm,
  1465. struct scsi_qla_host *vha)
  1466. {
  1467. int cnt;
  1468. uint32_t *dword_ptr;
  1469. int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
  1470. struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
  1471. ql_dbg(ql_dbg_tgt, vha, 0xe00e,
  1472. "iocb->scsi_status=%x, iocb->flags=%x\n",
  1473. le16_to_cpu(pkt24->u.status0.scsi_status),
  1474. le16_to_cpu(pkt24->u.status0.flags));
  1475. pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
  1476. /* Setup packet address segment pointer */
  1477. dword_ptr = pkt24->u.status0.dseg_0_address;
  1478. /* Set total data segment count */
  1479. if (prm->seg_cnt)
  1480. pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
  1481. if (prm->seg_cnt == 0) {
  1482. /* No data transfer */
  1483. *dword_ptr++ = 0;
  1484. *dword_ptr = 0;
  1485. return;
  1486. }
  1487. /* If scatter gather */
  1488. ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
  1489. /* Load command entry data segments */
  1490. for (cnt = 0;
  1491. (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
  1492. cnt++, prm->seg_cnt--) {
  1493. *dword_ptr++ =
  1494. cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
  1495. if (enable_64bit_addressing) {
  1496. *dword_ptr++ =
  1497. cpu_to_le32(pci_dma_hi32(
  1498. sg_dma_address(prm->sg)));
  1499. }
  1500. *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
  1501. ql_dbg(ql_dbg_tgt, vha, 0xe010,
  1502. "S/G Segment phys_addr=%llx:%llx, len=%d\n",
  1503. (long long unsigned int)pci_dma_hi32(sg_dma_address(
  1504. prm->sg)),
  1505. (long long unsigned int)pci_dma_lo32(sg_dma_address(
  1506. prm->sg)),
  1507. (int)sg_dma_len(prm->sg));
  1508. prm->sg = sg_next(prm->sg);
  1509. }
  1510. qlt_load_cont_data_segments(prm, vha);
  1511. }
  1512. static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
  1513. {
  1514. return cmd->bufflen > 0;
  1515. }
  1516. /*
  1517. * Called without ha->hardware_lock held
  1518. */
  1519. static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
  1520. struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
  1521. uint32_t *full_req_cnt)
  1522. {
  1523. struct qla_tgt *tgt = cmd->tgt;
  1524. struct scsi_qla_host *vha = tgt->vha;
  1525. struct qla_hw_data *ha = vha->hw;
  1526. struct se_cmd *se_cmd = &cmd->se_cmd;
  1527. if (unlikely(cmd->aborted)) {
  1528. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
  1529. "qla_target(%d): terminating exchange "
  1530. "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
  1531. se_cmd, cmd->tag);
  1532. cmd->state = QLA_TGT_STATE_ABORTED;
  1533. qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
  1534. /* !! At this point cmd could be already freed !! */
  1535. return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
  1536. }
  1537. ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
  1538. vha->vp_idx, cmd->tag);
  1539. prm->cmd = cmd;
  1540. prm->tgt = tgt;
  1541. prm->rq_result = scsi_status;
  1542. prm->sense_buffer = &cmd->sense_buffer[0];
  1543. prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
  1544. prm->sg = NULL;
  1545. prm->seg_cnt = -1;
  1546. prm->req_cnt = 1;
  1547. prm->add_status_pkt = 0;
  1548. ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
  1549. prm->rq_result, xmit_type);
  1550. /* Send marker if required */
  1551. if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
  1552. return -EFAULT;
  1553. ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
  1554. if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
  1555. if (qlt_pci_map_calc_cnt(prm) != 0)
  1556. return -EAGAIN;
  1557. }
  1558. *full_req_cnt = prm->req_cnt;
  1559. if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  1560. prm->residual = se_cmd->residual_count;
  1561. ql_dbg(ql_dbg_tgt, vha, 0xe014,
  1562. "Residual underflow: %d (tag %d, "
  1563. "op %x, bufflen %d, rq_result %x)\n", prm->residual,
  1564. cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
  1565. cmd->bufflen, prm->rq_result);
  1566. prm->rq_result |= SS_RESIDUAL_UNDER;
  1567. } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  1568. prm->residual = se_cmd->residual_count;
  1569. ql_dbg(ql_dbg_tgt, vha, 0xe015,
  1570. "Residual overflow: %d (tag %d, "
  1571. "op %x, bufflen %d, rq_result %x)\n", prm->residual,
  1572. cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
  1573. cmd->bufflen, prm->rq_result);
  1574. prm->rq_result |= SS_RESIDUAL_OVER;
  1575. }
  1576. if (xmit_type & QLA_TGT_XMIT_STATUS) {
  1577. /*
  1578. * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
  1579. * ignored in *xmit_response() below
  1580. */
  1581. if (qlt_has_data(cmd)) {
  1582. if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
  1583. (IS_FWI2_CAPABLE(ha) &&
  1584. (prm->rq_result != 0))) {
  1585. prm->add_status_pkt = 1;
  1586. (*full_req_cnt)++;
  1587. }
  1588. }
  1589. }
  1590. ql_dbg(ql_dbg_tgt, vha, 0xe016,
  1591. "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
  1592. prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
  1593. return 0;
  1594. }
  1595. static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
  1596. struct qla_tgt_cmd *cmd, int sending_sense)
  1597. {
  1598. if (ha->tgt.enable_class_2)
  1599. return 0;
  1600. if (sending_sense)
  1601. return cmd->conf_compl_supported;
  1602. else
  1603. return ha->tgt.enable_explicit_conf &&
  1604. cmd->conf_compl_supported;
  1605. }
  1606. #ifdef CONFIG_QLA_TGT_DEBUG_SRR
  1607. /*
  1608. * Original taken from the XFS code
  1609. */
  1610. static unsigned long qlt_srr_random(void)
  1611. {
  1612. static int Inited;
  1613. static unsigned long RandomValue;
  1614. static DEFINE_SPINLOCK(lock);
  1615. /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
  1616. register long rv;
  1617. register long lo;
  1618. register long hi;
  1619. unsigned long flags;
  1620. spin_lock_irqsave(&lock, flags);
  1621. if (!Inited) {
  1622. RandomValue = jiffies;
  1623. Inited = 1;
  1624. }
  1625. rv = RandomValue;
  1626. hi = rv / 127773;
  1627. lo = rv % 127773;
  1628. rv = 16807 * lo - 2836 * hi;
  1629. if (rv <= 0)
  1630. rv += 2147483647;
  1631. RandomValue = rv;
  1632. spin_unlock_irqrestore(&lock, flags);
  1633. return rv;
  1634. }
  1635. static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
  1636. {
  1637. #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
  1638. if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
  1639. == 50) {
  1640. *xmit_type &= ~QLA_TGT_XMIT_STATUS;
  1641. ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
  1642. "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
  1643. }
  1644. #endif
  1645. /*
  1646. * It's currently not possible to simulate SRRs for FCP_WRITE without
  1647. * a physical link layer failure, so don't even try here..
  1648. */
  1649. if (cmd->dma_data_direction != DMA_FROM_DEVICE)
  1650. return;
  1651. if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
  1652. ((qlt_srr_random() % 100) == 20)) {
  1653. int i, leave = 0;
  1654. unsigned int tot_len = 0;
  1655. while (leave == 0)
  1656. leave = qlt_srr_random() % cmd->sg_cnt;
  1657. for (i = 0; i < leave; i++)
  1658. tot_len += cmd->sg[i].length;
  1659. ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
  1660. "Cutting cmd %p (tag %d) buffer"
  1661. " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
  1662. " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
  1663. cmd->bufflen, cmd->sg_cnt);
  1664. cmd->bufflen = tot_len;
  1665. cmd->sg_cnt = leave;
  1666. }
  1667. if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
  1668. unsigned int offset = qlt_srr_random() % cmd->bufflen;
  1669. ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
  1670. "Cutting cmd %p (tag %d) buffer head "
  1671. "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
  1672. cmd->bufflen);
  1673. if (offset == 0)
  1674. *xmit_type &= ~QLA_TGT_XMIT_DATA;
  1675. else if (qlt_set_data_offset(cmd, offset)) {
  1676. ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
  1677. "qlt_set_data_offset() failed (tag %d)", cmd->tag);
  1678. }
  1679. }
  1680. }
  1681. #else
  1682. static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
  1683. {}
  1684. #endif
  1685. static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
  1686. struct qla_tgt_prm *prm)
  1687. {
  1688. prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
  1689. (uint32_t)sizeof(ctio->u.status1.sense_data));
  1690. ctio->u.status0.flags |=
  1691. __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
  1692. if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
  1693. ctio->u.status0.flags |= __constant_cpu_to_le16(
  1694. CTIO7_FLAGS_EXPLICIT_CONFORM |
  1695. CTIO7_FLAGS_CONFORM_REQ);
  1696. }
  1697. ctio->u.status0.residual = cpu_to_le32(prm->residual);
  1698. ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
  1699. if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
  1700. int i;
  1701. if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
  1702. if (prm->cmd->se_cmd.scsi_status != 0) {
  1703. ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
  1704. "Skipping EXPLICIT_CONFORM and "
  1705. "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
  1706. "non GOOD status\n");
  1707. goto skip_explict_conf;
  1708. }
  1709. ctio->u.status1.flags |= __constant_cpu_to_le16(
  1710. CTIO7_FLAGS_EXPLICIT_CONFORM |
  1711. CTIO7_FLAGS_CONFORM_REQ);
  1712. }
  1713. skip_explict_conf:
  1714. ctio->u.status1.flags &=
  1715. ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
  1716. ctio->u.status1.flags |=
  1717. __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
  1718. ctio->u.status1.scsi_status |=
  1719. __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
  1720. ctio->u.status1.sense_length =
  1721. cpu_to_le16(prm->sense_buffer_len);
  1722. for (i = 0; i < prm->sense_buffer_len/4; i++)
  1723. ((uint32_t *)ctio->u.status1.sense_data)[i] =
  1724. cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
  1725. #if 0
  1726. if (unlikely((prm->sense_buffer_len % 4) != 0)) {
  1727. static int q;
  1728. if (q < 10) {
  1729. ql_dbg(ql_dbg_tgt, vha, 0xe04f,
  1730. "qla_target(%d): %d bytes of sense "
  1731. "lost", prm->tgt->ha->vp_idx,
  1732. prm->sense_buffer_len % 4);
  1733. q++;
  1734. }
  1735. }
  1736. #endif
  1737. } else {
  1738. ctio->u.status1.flags &=
  1739. ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
  1740. ctio->u.status1.flags |=
  1741. __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
  1742. ctio->u.status1.sense_length = 0;
  1743. memset(ctio->u.status1.sense_data, 0,
  1744. sizeof(ctio->u.status1.sense_data));
  1745. }
  1746. /* Sense with len > 24, is it possible ??? */
  1747. }
  1748. /*
  1749. * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
  1750. * QLA_TGT_XMIT_STATUS for >= 24xx silicon
  1751. */
  1752. int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
  1753. uint8_t scsi_status)
  1754. {
  1755. struct scsi_qla_host *vha = cmd->vha;
  1756. struct qla_hw_data *ha = vha->hw;
  1757. struct ctio7_to_24xx *pkt;
  1758. struct qla_tgt_prm prm;
  1759. uint32_t full_req_cnt = 0;
  1760. unsigned long flags = 0;
  1761. int res;
  1762. memset(&prm, 0, sizeof(prm));
  1763. qlt_check_srr_debug(cmd, &xmit_type);
  1764. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
  1765. "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
  1766. "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
  1767. 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
  1768. res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
  1769. &full_req_cnt);
  1770. if (unlikely(res != 0)) {
  1771. if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
  1772. return 0;
  1773. return res;
  1774. }
  1775. spin_lock_irqsave(&ha->hardware_lock, flags);
  1776. /* Does F/W have an IOCBs for this request */
  1777. res = qlt_check_reserve_free_req(vha, full_req_cnt);
  1778. if (unlikely(res))
  1779. goto out_unmap_unlock;
  1780. res = qlt_24xx_build_ctio_pkt(&prm, vha);
  1781. if (unlikely(res != 0))
  1782. goto out_unmap_unlock;
  1783. pkt = (struct ctio7_to_24xx *)prm.pkt;
  1784. if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
  1785. pkt->u.status0.flags |=
  1786. __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
  1787. CTIO7_FLAGS_STATUS_MODE_0);
  1788. qlt_load_data_segments(&prm, vha);
  1789. if (prm.add_status_pkt == 0) {
  1790. if (xmit_type & QLA_TGT_XMIT_STATUS) {
  1791. pkt->u.status0.scsi_status =
  1792. cpu_to_le16(prm.rq_result);
  1793. pkt->u.status0.residual =
  1794. cpu_to_le32(prm.residual);
  1795. pkt->u.status0.flags |= __constant_cpu_to_le16(
  1796. CTIO7_FLAGS_SEND_STATUS);
  1797. if (qlt_need_explicit_conf(ha, cmd, 0)) {
  1798. pkt->u.status0.flags |=
  1799. __constant_cpu_to_le16(
  1800. CTIO7_FLAGS_EXPLICIT_CONFORM |
  1801. CTIO7_FLAGS_CONFORM_REQ);
  1802. }
  1803. }
  1804. } else {
  1805. /*
  1806. * We have already made sure that there is sufficient
  1807. * amount of request entries to not drop HW lock in
  1808. * req_pkt().
  1809. */
  1810. struct ctio7_to_24xx *ctio =
  1811. (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
  1812. ql_dbg(ql_dbg_tgt, vha, 0xe019,
  1813. "Building additional status packet\n");
  1814. memcpy(ctio, pkt, sizeof(*ctio));
  1815. ctio->entry_count = 1;
  1816. ctio->dseg_count = 0;
  1817. ctio->u.status1.flags &= ~__constant_cpu_to_le16(
  1818. CTIO7_FLAGS_DATA_IN);
  1819. /* Real finish is ctio_m1's finish */
  1820. pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
  1821. pkt->u.status0.flags |= __constant_cpu_to_le16(
  1822. CTIO7_FLAGS_DONT_RET_CTIO);
  1823. qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
  1824. &prm);
  1825. pr_debug("Status CTIO7: %p\n", ctio);
  1826. }
  1827. } else
  1828. qlt_24xx_init_ctio_to_isp(pkt, &prm);
  1829. cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
  1830. ql_dbg(ql_dbg_tgt, vha, 0xe01a,
  1831. "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
  1832. pkt, scsi_status);
  1833. qla2x00_start_iocbs(vha, vha->req);
  1834. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1835. return 0;
  1836. out_unmap_unlock:
  1837. if (cmd->sg_mapped)
  1838. qlt_unmap_sg(vha, cmd);
  1839. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1840. return res;
  1841. }
  1842. EXPORT_SYMBOL(qlt_xmit_response);
  1843. int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
  1844. {
  1845. struct ctio7_to_24xx *pkt;
  1846. struct scsi_qla_host *vha = cmd->vha;
  1847. struct qla_hw_data *ha = vha->hw;
  1848. struct qla_tgt *tgt = cmd->tgt;
  1849. struct qla_tgt_prm prm;
  1850. unsigned long flags;
  1851. int res = 0;
  1852. memset(&prm, 0, sizeof(prm));
  1853. prm.cmd = cmd;
  1854. prm.tgt = tgt;
  1855. prm.sg = NULL;
  1856. prm.req_cnt = 1;
  1857. /* Send marker if required */
  1858. if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
  1859. return -EIO;
  1860. ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
  1861. (int)vha->vp_idx);
  1862. /* Calculate number of entries and segments required */
  1863. if (qlt_pci_map_calc_cnt(&prm) != 0)
  1864. return -EAGAIN;
  1865. spin_lock_irqsave(&ha->hardware_lock, flags);
  1866. /* Does F/W have an IOCBs for this request */
  1867. res = qlt_check_reserve_free_req(vha, prm.req_cnt);
  1868. if (res != 0)
  1869. goto out_unlock_free_unmap;
  1870. res = qlt_24xx_build_ctio_pkt(&prm, vha);
  1871. if (unlikely(res != 0))
  1872. goto out_unlock_free_unmap;
  1873. pkt = (struct ctio7_to_24xx *)prm.pkt;
  1874. pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
  1875. CTIO7_FLAGS_STATUS_MODE_0);
  1876. qlt_load_data_segments(&prm, vha);
  1877. cmd->state = QLA_TGT_STATE_NEED_DATA;
  1878. qla2x00_start_iocbs(vha, vha->req);
  1879. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1880. return res;
  1881. out_unlock_free_unmap:
  1882. if (cmd->sg_mapped)
  1883. qlt_unmap_sg(vha, cmd);
  1884. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1885. return res;
  1886. }
  1887. EXPORT_SYMBOL(qlt_rdy_to_xfer);
  1888. /* If hardware_lock held on entry, might drop it, then reaquire */
  1889. /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
  1890. static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
  1891. struct qla_tgt_cmd *cmd,
  1892. struct atio_from_isp *atio)
  1893. {
  1894. struct ctio7_to_24xx *ctio24;
  1895. struct qla_hw_data *ha = vha->hw;
  1896. request_t *pkt;
  1897. int ret = 0;
  1898. ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
  1899. pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
  1900. if (pkt == NULL) {
  1901. ql_dbg(ql_dbg_tgt, vha, 0xe050,
  1902. "qla_target(%d): %s failed: unable to allocate "
  1903. "request packet\n", vha->vp_idx, __func__);
  1904. return -ENOMEM;
  1905. }
  1906. if (cmd != NULL) {
  1907. if (cmd->state < QLA_TGT_STATE_PROCESSED) {
  1908. ql_dbg(ql_dbg_tgt, vha, 0xe051,
  1909. "qla_target(%d): Terminating cmd %p with "
  1910. "incorrect state %d\n", vha->vp_idx, cmd,
  1911. cmd->state);
  1912. } else
  1913. ret = 1;
  1914. }
  1915. pkt->entry_count = 1;
  1916. pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
  1917. ctio24 = (struct ctio7_to_24xx *)pkt;
  1918. ctio24->entry_type = CTIO_TYPE7;
  1919. ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
  1920. ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
  1921. ctio24->vp_index = vha->vp_idx;
  1922. ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
  1923. ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
  1924. ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
  1925. ctio24->exchange_addr = atio->u.isp24.exchange_addr;
  1926. ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
  1927. __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
  1928. CTIO7_FLAGS_TERMINATE);
  1929. ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
  1930. /* Most likely, it isn't needed */
  1931. ctio24->u.status1.residual = get_unaligned((uint32_t *)
  1932. &atio->u.isp24.fcp_cmnd.add_cdb[
  1933. atio->u.isp24.fcp_cmnd.add_cdb_len]);
  1934. if (ctio24->u.status1.residual != 0)
  1935. ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
  1936. qla2x00_start_iocbs(vha, vha->req);
  1937. return ret;
  1938. }
  1939. static void qlt_send_term_exchange(struct scsi_qla_host *vha,
  1940. struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
  1941. {
  1942. unsigned long flags;
  1943. int rc;
  1944. if (qlt_issue_marker(vha, ha_locked) < 0)
  1945. return;
  1946. if (ha_locked) {
  1947. rc = __qlt_send_term_exchange(vha, cmd, atio);
  1948. goto done;
  1949. }
  1950. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  1951. rc = __qlt_send_term_exchange(vha, cmd, atio);
  1952. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  1953. done:
  1954. if (rc == 1) {
  1955. if (!ha_locked && !in_interrupt())
  1956. msleep(250); /* just in case */
  1957. vha->hw->tgt.tgt_ops->free_cmd(cmd);
  1958. }
  1959. }
  1960. void qlt_free_cmd(struct qla_tgt_cmd *cmd)
  1961. {
  1962. BUG_ON(cmd->sg_mapped);
  1963. if (unlikely(cmd->free_sg))
  1964. kfree(cmd->sg);
  1965. kmem_cache_free(qla_tgt_cmd_cachep, cmd);
  1966. }
  1967. EXPORT_SYMBOL(qlt_free_cmd);
  1968. /* ha->hardware_lock supposed to be held on entry */
  1969. static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
  1970. struct qla_tgt_cmd *cmd, void *ctio)
  1971. {
  1972. struct qla_tgt_srr_ctio *sc;
  1973. struct qla_hw_data *ha = vha->hw;
  1974. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  1975. struct qla_tgt_srr_imm *imm;
  1976. tgt->ctio_srr_id++;
  1977. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
  1978. "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
  1979. if (!ctio) {
  1980. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
  1981. "qla_target(%d): SRR CTIO, but ctio is NULL\n",
  1982. vha->vp_idx);
  1983. return -EINVAL;
  1984. }
  1985. sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
  1986. if (sc != NULL) {
  1987. sc->cmd = cmd;
  1988. /* IRQ is already OFF */
  1989. spin_lock(&tgt->srr_lock);
  1990. sc->srr_id = tgt->ctio_srr_id;
  1991. list_add_tail(&sc->srr_list_entry,
  1992. &tgt->srr_ctio_list);
  1993. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
  1994. "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
  1995. if (tgt->imm_srr_id == tgt->ctio_srr_id) {
  1996. int found = 0;
  1997. list_for_each_entry(imm, &tgt->srr_imm_list,
  1998. srr_list_entry) {
  1999. if (imm->srr_id == sc->srr_id) {
  2000. found = 1;
  2001. break;
  2002. }
  2003. }
  2004. if (found) {
  2005. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
  2006. "Scheduling srr work\n");
  2007. schedule_work(&tgt->srr_work);
  2008. } else {
  2009. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
  2010. "qla_target(%d): imm_srr_id "
  2011. "== ctio_srr_id (%d), but there is no "
  2012. "corresponding SRR IMM, deleting CTIO "
  2013. "SRR %p\n", vha->vp_idx,
  2014. tgt->ctio_srr_id, sc);
  2015. list_del(&sc->srr_list_entry);
  2016. spin_unlock(&tgt->srr_lock);
  2017. kfree(sc);
  2018. return -EINVAL;
  2019. }
  2020. }
  2021. spin_unlock(&tgt->srr_lock);
  2022. } else {
  2023. struct qla_tgt_srr_imm *ti;
  2024. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
  2025. "qla_target(%d): Unable to allocate SRR CTIO entry\n",
  2026. vha->vp_idx);
  2027. spin_lock(&tgt->srr_lock);
  2028. list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
  2029. srr_list_entry) {
  2030. if (imm->srr_id == tgt->ctio_srr_id) {
  2031. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
  2032. "IMM SRR %p deleted (id %d)\n",
  2033. imm, imm->srr_id);
  2034. list_del(&imm->srr_list_entry);
  2035. qlt_reject_free_srr_imm(vha, imm, 1);
  2036. }
  2037. }
  2038. spin_unlock(&tgt->srr_lock);
  2039. return -ENOMEM;
  2040. }
  2041. return 0;
  2042. }
  2043. /*
  2044. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  2045. */
  2046. static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
  2047. struct qla_tgt_cmd *cmd, uint32_t status)
  2048. {
  2049. int term = 0;
  2050. if (ctio != NULL) {
  2051. struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
  2052. term = !(c->flags &
  2053. __constant_cpu_to_le16(OF_TERM_EXCH));
  2054. } else
  2055. term = 1;
  2056. if (term)
  2057. qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
  2058. return term;
  2059. }
  2060. /* ha->hardware_lock supposed to be held on entry */
  2061. static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
  2062. uint32_t handle)
  2063. {
  2064. struct qla_hw_data *ha = vha->hw;
  2065. handle--;
  2066. if (ha->tgt.cmds[handle] != NULL) {
  2067. struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
  2068. ha->tgt.cmds[handle] = NULL;
  2069. return cmd;
  2070. } else
  2071. return NULL;
  2072. }
  2073. /* ha->hardware_lock supposed to be held on entry */
  2074. static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
  2075. uint32_t handle, void *ctio)
  2076. {
  2077. struct qla_tgt_cmd *cmd = NULL;
  2078. /* Clear out internal marks */
  2079. handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
  2080. CTIO_INTERMEDIATE_HANDLE_MARK);
  2081. if (handle != QLA_TGT_NULL_HANDLE) {
  2082. if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
  2083. ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
  2084. "SKIP_HANDLE CTIO\n");
  2085. return NULL;
  2086. }
  2087. /* handle-1 is actually used */
  2088. if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
  2089. ql_dbg(ql_dbg_tgt, vha, 0xe052,
  2090. "qla_target(%d): Wrong handle %x received\n",
  2091. vha->vp_idx, handle);
  2092. return NULL;
  2093. }
  2094. cmd = qlt_get_cmd(vha, handle);
  2095. if (unlikely(cmd == NULL)) {
  2096. ql_dbg(ql_dbg_tgt, vha, 0xe053,
  2097. "qla_target(%d): Suspicious: unable to "
  2098. "find the command with handle %x\n", vha->vp_idx,
  2099. handle);
  2100. return NULL;
  2101. }
  2102. } else if (ctio != NULL) {
  2103. /* We can't get loop ID from CTIO7 */
  2104. ql_dbg(ql_dbg_tgt, vha, 0xe054,
  2105. "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
  2106. "support NULL handles\n", vha->vp_idx);
  2107. return NULL;
  2108. }
  2109. return cmd;
  2110. }
  2111. /*
  2112. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  2113. */
  2114. static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
  2115. uint32_t status, void *ctio)
  2116. {
  2117. struct qla_hw_data *ha = vha->hw;
  2118. struct se_cmd *se_cmd;
  2119. struct target_core_fabric_ops *tfo;
  2120. struct qla_tgt_cmd *cmd;
  2121. ql_dbg(ql_dbg_tgt, vha, 0xe01e,
  2122. "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
  2123. vha->vp_idx, ctio, status, handle);
  2124. if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
  2125. /* That could happen only in case of an error/reset/abort */
  2126. if (status != CTIO_SUCCESS) {
  2127. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
  2128. "Intermediate CTIO received"
  2129. " (status %x)\n", status);
  2130. }
  2131. return;
  2132. }
  2133. cmd = qlt_ctio_to_cmd(vha, handle, ctio);
  2134. if (cmd == NULL)
  2135. return;
  2136. se_cmd = &cmd->se_cmd;
  2137. tfo = se_cmd->se_tfo;
  2138. if (cmd->sg_mapped)
  2139. qlt_unmap_sg(vha, cmd);
  2140. if (unlikely(status != CTIO_SUCCESS)) {
  2141. switch (status & 0xFFFF) {
  2142. case CTIO_LIP_RESET:
  2143. case CTIO_TARGET_RESET:
  2144. case CTIO_ABORTED:
  2145. case CTIO_TIMEOUT:
  2146. case CTIO_INVALID_RX_ID:
  2147. /* They are OK */
  2148. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
  2149. "qla_target(%d): CTIO with "
  2150. "status %#x received, state %x, se_cmd %p, "
  2151. "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
  2152. "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
  2153. status, cmd->state, se_cmd);
  2154. break;
  2155. case CTIO_PORT_LOGGED_OUT:
  2156. case CTIO_PORT_UNAVAILABLE:
  2157. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
  2158. "qla_target(%d): CTIO with PORT LOGGED "
  2159. "OUT (29) or PORT UNAVAILABLE (28) status %x "
  2160. "received (state %x, se_cmd %p)\n", vha->vp_idx,
  2161. status, cmd->state, se_cmd);
  2162. break;
  2163. case CTIO_SRR_RECEIVED:
  2164. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
  2165. "qla_target(%d): CTIO with SRR_RECEIVED"
  2166. " status %x received (state %x, se_cmd %p)\n",
  2167. vha->vp_idx, status, cmd->state, se_cmd);
  2168. if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
  2169. break;
  2170. else
  2171. return;
  2172. default:
  2173. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
  2174. "qla_target(%d): CTIO with error status "
  2175. "0x%x received (state %x, se_cmd %p\n",
  2176. vha->vp_idx, status, cmd->state, se_cmd);
  2177. break;
  2178. }
  2179. if (cmd->state != QLA_TGT_STATE_NEED_DATA)
  2180. if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
  2181. return;
  2182. }
  2183. if (cmd->state == QLA_TGT_STATE_PROCESSED) {
  2184. ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
  2185. } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
  2186. int rx_status = 0;
  2187. cmd->state = QLA_TGT_STATE_DATA_IN;
  2188. if (unlikely(status != CTIO_SUCCESS))
  2189. rx_status = -EIO;
  2190. else
  2191. cmd->write_data_transferred = 1;
  2192. ql_dbg(ql_dbg_tgt, vha, 0xe020,
  2193. "Data received, context %x, rx_status %d\n",
  2194. 0x0, rx_status);
  2195. ha->tgt.tgt_ops->handle_data(cmd);
  2196. return;
  2197. } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
  2198. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
  2199. "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
  2200. } else {
  2201. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
  2202. "qla_target(%d): A command in state (%d) should "
  2203. "not return a CTIO complete\n", vha->vp_idx, cmd->state);
  2204. }
  2205. if (unlikely(status != CTIO_SUCCESS)) {
  2206. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
  2207. dump_stack();
  2208. }
  2209. ha->tgt.tgt_ops->free_cmd(cmd);
  2210. }
  2211. /* ha->hardware_lock supposed to be held on entry */
  2212. /* called via callback from qla2xxx */
  2213. void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
  2214. {
  2215. struct qla_hw_data *ha = vha->hw;
  2216. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  2217. if (likely(tgt == NULL)) {
  2218. ql_dbg(ql_dbg_tgt, vha, 0xe021,
  2219. "CTIO, but target mode not enabled"
  2220. " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
  2221. return;
  2222. }
  2223. tgt->irq_cmd_count++;
  2224. qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
  2225. tgt->irq_cmd_count--;
  2226. }
  2227. static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
  2228. uint8_t task_codes)
  2229. {
  2230. int fcp_task_attr;
  2231. switch (task_codes) {
  2232. case ATIO_SIMPLE_QUEUE:
  2233. fcp_task_attr = MSG_SIMPLE_TAG;
  2234. break;
  2235. case ATIO_HEAD_OF_QUEUE:
  2236. fcp_task_attr = MSG_HEAD_TAG;
  2237. break;
  2238. case ATIO_ORDERED_QUEUE:
  2239. fcp_task_attr = MSG_ORDERED_TAG;
  2240. break;
  2241. case ATIO_ACA_QUEUE:
  2242. fcp_task_attr = MSG_ACA_TAG;
  2243. break;
  2244. case ATIO_UNTAGGED:
  2245. fcp_task_attr = MSG_SIMPLE_TAG;
  2246. break;
  2247. default:
  2248. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
  2249. "qla_target: unknown task code %x, use ORDERED instead\n",
  2250. task_codes);
  2251. fcp_task_attr = MSG_ORDERED_TAG;
  2252. break;
  2253. }
  2254. return fcp_task_attr;
  2255. }
  2256. static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
  2257. uint8_t *);
  2258. /*
  2259. * Process context for I/O path into tcm_qla2xxx code
  2260. */
  2261. static void qlt_do_work(struct work_struct *work)
  2262. {
  2263. struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
  2264. scsi_qla_host_t *vha = cmd->vha;
  2265. struct qla_hw_data *ha = vha->hw;
  2266. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  2267. struct qla_tgt_sess *sess = NULL;
  2268. struct atio_from_isp *atio = &cmd->atio;
  2269. unsigned char *cdb;
  2270. unsigned long flags;
  2271. uint32_t data_length;
  2272. int ret, fcp_task_attr, data_dir, bidi = 0;
  2273. if (tgt->tgt_stop)
  2274. goto out_term;
  2275. spin_lock_irqsave(&ha->hardware_lock, flags);
  2276. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
  2277. atio->u.isp24.fcp_hdr.s_id);
  2278. if (sess) {
  2279. if (unlikely(sess->tearing_down)) {
  2280. sess = NULL;
  2281. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2282. goto out_term;
  2283. } else {
  2284. /*
  2285. * Do the extra kref_get() before dropping
  2286. * qla_hw_data->hardware_lock.
  2287. */
  2288. kref_get(&sess->se_sess->sess_kref);
  2289. }
  2290. }
  2291. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2292. if (unlikely(!sess)) {
  2293. uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
  2294. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
  2295. "qla_target(%d): Unable to find wwn login"
  2296. " (s_id %x:%x:%x), trying to create it manually\n",
  2297. vha->vp_idx, s_id[0], s_id[1], s_id[2]);
  2298. if (atio->u.raw.entry_count > 1) {
  2299. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
  2300. "Dropping multy entry cmd %p\n", cmd);
  2301. goto out_term;
  2302. }
  2303. mutex_lock(&ha->tgt.tgt_mutex);
  2304. sess = qlt_make_local_sess(vha, s_id);
  2305. /* sess has an extra creation ref. */
  2306. mutex_unlock(&ha->tgt.tgt_mutex);
  2307. if (!sess)
  2308. goto out_term;
  2309. }
  2310. cmd->sess = sess;
  2311. cmd->loop_id = sess->loop_id;
  2312. cmd->conf_compl_supported = sess->conf_compl_supported;
  2313. cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
  2314. cmd->tag = atio->u.isp24.exchange_addr;
  2315. cmd->unpacked_lun = scsilun_to_int(
  2316. (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
  2317. if (atio->u.isp24.fcp_cmnd.rddata &&
  2318. atio->u.isp24.fcp_cmnd.wrdata) {
  2319. bidi = 1;
  2320. data_dir = DMA_TO_DEVICE;
  2321. } else if (atio->u.isp24.fcp_cmnd.rddata)
  2322. data_dir = DMA_FROM_DEVICE;
  2323. else if (atio->u.isp24.fcp_cmnd.wrdata)
  2324. data_dir = DMA_TO_DEVICE;
  2325. else
  2326. data_dir = DMA_NONE;
  2327. fcp_task_attr = qlt_get_fcp_task_attr(vha,
  2328. atio->u.isp24.fcp_cmnd.task_attr);
  2329. data_length = be32_to_cpu(get_unaligned((uint32_t *)
  2330. &atio->u.isp24.fcp_cmnd.add_cdb[
  2331. atio->u.isp24.fcp_cmnd.add_cdb_len]));
  2332. ql_dbg(ql_dbg_tgt, vha, 0xe022,
  2333. "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
  2334. cmd, cmd->unpacked_lun, cmd->tag);
  2335. ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
  2336. fcp_task_attr, data_dir, bidi);
  2337. if (ret != 0)
  2338. goto out_term;
  2339. /*
  2340. * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
  2341. */
  2342. ha->tgt.tgt_ops->put_sess(sess);
  2343. return;
  2344. out_term:
  2345. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
  2346. /*
  2347. * cmd has not sent to target yet, so pass NULL as the second
  2348. * argument to qlt_send_term_exchange() and free the memory here.
  2349. */
  2350. spin_lock_irqsave(&ha->hardware_lock, flags);
  2351. qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
  2352. kmem_cache_free(qla_tgt_cmd_cachep, cmd);
  2353. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2354. if (sess)
  2355. ha->tgt.tgt_ops->put_sess(sess);
  2356. }
  2357. /* ha->hardware_lock supposed to be held on entry */
  2358. static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
  2359. struct atio_from_isp *atio)
  2360. {
  2361. struct qla_hw_data *ha = vha->hw;
  2362. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  2363. struct qla_tgt_cmd *cmd;
  2364. if (unlikely(tgt->tgt_stop)) {
  2365. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
  2366. "New command while device %p is shutting down\n", tgt);
  2367. return -EFAULT;
  2368. }
  2369. cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
  2370. if (!cmd) {
  2371. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
  2372. "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
  2373. return -ENOMEM;
  2374. }
  2375. INIT_LIST_HEAD(&cmd->cmd_list);
  2376. memcpy(&cmd->atio, atio, sizeof(*atio));
  2377. cmd->state = QLA_TGT_STATE_NEW;
  2378. cmd->tgt = ha->tgt.qla_tgt;
  2379. cmd->vha = vha;
  2380. INIT_WORK(&cmd->work, qlt_do_work);
  2381. queue_work(qla_tgt_wq, &cmd->work);
  2382. return 0;
  2383. }
  2384. /* ha->hardware_lock supposed to be held on entry */
  2385. static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
  2386. int fn, void *iocb, int flags)
  2387. {
  2388. struct scsi_qla_host *vha = sess->vha;
  2389. struct qla_hw_data *ha = vha->hw;
  2390. struct qla_tgt_mgmt_cmd *mcmd;
  2391. int res;
  2392. uint8_t tmr_func;
  2393. mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
  2394. if (!mcmd) {
  2395. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
  2396. "qla_target(%d): Allocation of management "
  2397. "command failed, some commands and their data could "
  2398. "leak\n", vha->vp_idx);
  2399. return -ENOMEM;
  2400. }
  2401. memset(mcmd, 0, sizeof(*mcmd));
  2402. mcmd->sess = sess;
  2403. if (iocb) {
  2404. memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
  2405. sizeof(mcmd->orig_iocb.imm_ntfy));
  2406. }
  2407. mcmd->tmr_func = fn;
  2408. mcmd->flags = flags;
  2409. switch (fn) {
  2410. case QLA_TGT_CLEAR_ACA:
  2411. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
  2412. "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
  2413. tmr_func = TMR_CLEAR_ACA;
  2414. break;
  2415. case QLA_TGT_TARGET_RESET:
  2416. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
  2417. "qla_target(%d): TARGET_RESET received\n",
  2418. sess->vha->vp_idx);
  2419. tmr_func = TMR_TARGET_WARM_RESET;
  2420. break;
  2421. case QLA_TGT_LUN_RESET:
  2422. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
  2423. "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
  2424. tmr_func = TMR_LUN_RESET;
  2425. break;
  2426. case QLA_TGT_CLEAR_TS:
  2427. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
  2428. "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
  2429. tmr_func = TMR_CLEAR_TASK_SET;
  2430. break;
  2431. case QLA_TGT_ABORT_TS:
  2432. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
  2433. "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
  2434. tmr_func = TMR_ABORT_TASK_SET;
  2435. break;
  2436. #if 0
  2437. case QLA_TGT_ABORT_ALL:
  2438. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
  2439. "qla_target(%d): Doing ABORT_ALL_TASKS\n",
  2440. sess->vha->vp_idx);
  2441. tmr_func = 0;
  2442. break;
  2443. case QLA_TGT_ABORT_ALL_SESS:
  2444. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
  2445. "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
  2446. sess->vha->vp_idx);
  2447. tmr_func = 0;
  2448. break;
  2449. case QLA_TGT_NEXUS_LOSS_SESS:
  2450. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
  2451. "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
  2452. sess->vha->vp_idx);
  2453. tmr_func = 0;
  2454. break;
  2455. case QLA_TGT_NEXUS_LOSS:
  2456. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
  2457. "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
  2458. tmr_func = 0;
  2459. break;
  2460. #endif
  2461. default:
  2462. ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
  2463. "qla_target(%d): Unknown task mgmt fn 0x%x\n",
  2464. sess->vha->vp_idx, fn);
  2465. mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
  2466. return -ENOSYS;
  2467. }
  2468. res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
  2469. if (res != 0) {
  2470. ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
  2471. "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
  2472. sess->vha->vp_idx, res);
  2473. mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
  2474. return -EFAULT;
  2475. }
  2476. return 0;
  2477. }
  2478. /* ha->hardware_lock supposed to be held on entry */
  2479. static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
  2480. {
  2481. struct atio_from_isp *a = (struct atio_from_isp *)iocb;
  2482. struct qla_hw_data *ha = vha->hw;
  2483. struct qla_tgt *tgt;
  2484. struct qla_tgt_sess *sess;
  2485. uint32_t lun, unpacked_lun;
  2486. int lun_size, fn;
  2487. tgt = ha->tgt.qla_tgt;
  2488. lun = a->u.isp24.fcp_cmnd.lun;
  2489. lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
  2490. fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
  2491. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
  2492. a->u.isp24.fcp_hdr.s_id);
  2493. unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
  2494. if (!sess) {
  2495. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
  2496. "qla_target(%d): task mgmt fn 0x%x for "
  2497. "non-existant session\n", vha->vp_idx, fn);
  2498. return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
  2499. sizeof(struct atio_from_isp));
  2500. }
  2501. return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
  2502. }
  2503. /* ha->hardware_lock supposed to be held on entry */
  2504. static int __qlt_abort_task(struct scsi_qla_host *vha,
  2505. struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
  2506. {
  2507. struct atio_from_isp *a = (struct atio_from_isp *)iocb;
  2508. struct qla_hw_data *ha = vha->hw;
  2509. struct qla_tgt_mgmt_cmd *mcmd;
  2510. uint32_t lun, unpacked_lun;
  2511. int rc;
  2512. mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
  2513. if (mcmd == NULL) {
  2514. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
  2515. "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
  2516. vha->vp_idx, __func__);
  2517. return -ENOMEM;
  2518. }
  2519. memset(mcmd, 0, sizeof(*mcmd));
  2520. mcmd->sess = sess;
  2521. memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
  2522. sizeof(mcmd->orig_iocb.imm_ntfy));
  2523. lun = a->u.isp24.fcp_cmnd.lun;
  2524. unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
  2525. rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
  2526. le16_to_cpu(iocb->u.isp2x.seq_id));
  2527. if (rc != 0) {
  2528. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
  2529. "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
  2530. vha->vp_idx, rc);
  2531. mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
  2532. return -EFAULT;
  2533. }
  2534. return 0;
  2535. }
  2536. /* ha->hardware_lock supposed to be held on entry */
  2537. static int qlt_abort_task(struct scsi_qla_host *vha,
  2538. struct imm_ntfy_from_isp *iocb)
  2539. {
  2540. struct qla_hw_data *ha = vha->hw;
  2541. struct qla_tgt_sess *sess;
  2542. int loop_id;
  2543. loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
  2544. sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
  2545. if (sess == NULL) {
  2546. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
  2547. "qla_target(%d): task abort for unexisting "
  2548. "session\n", vha->vp_idx);
  2549. return qlt_sched_sess_work(ha->tgt.qla_tgt,
  2550. QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
  2551. }
  2552. return __qlt_abort_task(vha, iocb, sess);
  2553. }
  2554. /*
  2555. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  2556. */
  2557. static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
  2558. struct imm_ntfy_from_isp *iocb)
  2559. {
  2560. struct qla_hw_data *ha = vha->hw;
  2561. int res = 0;
  2562. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
  2563. "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
  2564. " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
  2565. iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
  2566. iocb->u.isp24.status_subcode);
  2567. switch (iocb->u.isp24.status_subcode) {
  2568. case ELS_PLOGI:
  2569. case ELS_FLOGI:
  2570. case ELS_PRLI:
  2571. case ELS_LOGO:
  2572. case ELS_PRLO:
  2573. res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
  2574. break;
  2575. case ELS_PDISC:
  2576. case ELS_ADISC:
  2577. {
  2578. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  2579. if (tgt->link_reinit_iocb_pending) {
  2580. qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
  2581. 0, 0, 0, 0, 0, 0);
  2582. tgt->link_reinit_iocb_pending = 0;
  2583. }
  2584. res = 1; /* send notify ack */
  2585. break;
  2586. }
  2587. default:
  2588. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
  2589. "qla_target(%d): Unsupported ELS command %x "
  2590. "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
  2591. res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
  2592. break;
  2593. }
  2594. return res;
  2595. }
  2596. static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
  2597. {
  2598. struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
  2599. size_t first_offset = 0, rem_offset = offset, tmp = 0;
  2600. int i, sg_srr_cnt, bufflen = 0;
  2601. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
  2602. "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
  2603. "cmd->sg_cnt: %u, direction: %d\n",
  2604. cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
  2605. /*
  2606. * FIXME: Reject non zero SRR relative offset until we can test
  2607. * this code properly.
  2608. */
  2609. pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
  2610. return -1;
  2611. if (!cmd->sg || !cmd->sg_cnt) {
  2612. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
  2613. "Missing cmd->sg or zero cmd->sg_cnt in"
  2614. " qla_tgt_set_data_offset\n");
  2615. return -EINVAL;
  2616. }
  2617. /*
  2618. * Walk the current cmd->sg list until we locate the new sg_srr_start
  2619. */
  2620. for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
  2621. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
  2622. "sg[%d]: %p page: %p, length: %d, offset: %d\n",
  2623. i, sg, sg_page(sg), sg->length, sg->offset);
  2624. if ((sg->length + tmp) > offset) {
  2625. first_offset = rem_offset;
  2626. sg_srr_start = sg;
  2627. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
  2628. "Found matching sg[%d], using %p as sg_srr_start, "
  2629. "and using first_offset: %zu\n", i, sg,
  2630. first_offset);
  2631. break;
  2632. }
  2633. tmp += sg->length;
  2634. rem_offset -= sg->length;
  2635. }
  2636. if (!sg_srr_start) {
  2637. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
  2638. "Unable to locate sg_srr_start for offset: %u\n", offset);
  2639. return -EINVAL;
  2640. }
  2641. sg_srr_cnt = (cmd->sg_cnt - i);
  2642. sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
  2643. if (!sg_srr) {
  2644. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
  2645. "Unable to allocate sgp\n");
  2646. return -ENOMEM;
  2647. }
  2648. sg_init_table(sg_srr, sg_srr_cnt);
  2649. sgp = &sg_srr[0];
  2650. /*
  2651. * Walk the remaining list for sg_srr_start, mapping to the newly
  2652. * allocated sg_srr taking first_offset into account.
  2653. */
  2654. for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
  2655. if (first_offset) {
  2656. sg_set_page(sgp, sg_page(sg),
  2657. (sg->length - first_offset), first_offset);
  2658. first_offset = 0;
  2659. } else {
  2660. sg_set_page(sgp, sg_page(sg), sg->length, 0);
  2661. }
  2662. bufflen += sgp->length;
  2663. sgp = sg_next(sgp);
  2664. if (!sgp)
  2665. break;
  2666. }
  2667. cmd->sg = sg_srr;
  2668. cmd->sg_cnt = sg_srr_cnt;
  2669. cmd->bufflen = bufflen;
  2670. cmd->offset += offset;
  2671. cmd->free_sg = 1;
  2672. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
  2673. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
  2674. cmd->sg_cnt);
  2675. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
  2676. cmd->bufflen);
  2677. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
  2678. cmd->offset);
  2679. if (cmd->sg_cnt < 0)
  2680. BUG();
  2681. if (cmd->bufflen < 0)
  2682. BUG();
  2683. return 0;
  2684. }
  2685. static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
  2686. uint32_t srr_rel_offs, int *xmit_type)
  2687. {
  2688. int res = 0, rel_offs;
  2689. rel_offs = srr_rel_offs - cmd->offset;
  2690. ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
  2691. srr_rel_offs, rel_offs);
  2692. *xmit_type = QLA_TGT_XMIT_ALL;
  2693. if (rel_offs < 0) {
  2694. ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
  2695. "qla_target(%d): SRR rel_offs (%d) < 0",
  2696. cmd->vha->vp_idx, rel_offs);
  2697. res = -1;
  2698. } else if (rel_offs == cmd->bufflen)
  2699. *xmit_type = QLA_TGT_XMIT_STATUS;
  2700. else if (rel_offs > 0)
  2701. res = qlt_set_data_offset(cmd, rel_offs);
  2702. return res;
  2703. }
  2704. /* No locks, thread context */
  2705. static void qlt_handle_srr(struct scsi_qla_host *vha,
  2706. struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
  2707. {
  2708. struct imm_ntfy_from_isp *ntfy =
  2709. (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
  2710. struct qla_hw_data *ha = vha->hw;
  2711. struct qla_tgt_cmd *cmd = sctio->cmd;
  2712. struct se_cmd *se_cmd = &cmd->se_cmd;
  2713. unsigned long flags;
  2714. int xmit_type = 0, resp = 0;
  2715. uint32_t offset;
  2716. uint16_t srr_ui;
  2717. offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
  2718. srr_ui = ntfy->u.isp24.srr_ui;
  2719. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
  2720. cmd, srr_ui);
  2721. switch (srr_ui) {
  2722. case SRR_IU_STATUS:
  2723. spin_lock_irqsave(&ha->hardware_lock, flags);
  2724. qlt_send_notify_ack(vha, ntfy,
  2725. 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
  2726. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2727. xmit_type = QLA_TGT_XMIT_STATUS;
  2728. resp = 1;
  2729. break;
  2730. case SRR_IU_DATA_IN:
  2731. if (!cmd->sg || !cmd->sg_cnt) {
  2732. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
  2733. "Unable to process SRR_IU_DATA_IN due to"
  2734. " missing cmd->sg, state: %d\n", cmd->state);
  2735. dump_stack();
  2736. goto out_reject;
  2737. }
  2738. if (se_cmd->scsi_status != 0) {
  2739. ql_dbg(ql_dbg_tgt, vha, 0xe02a,
  2740. "Rejecting SRR_IU_DATA_IN with non GOOD "
  2741. "scsi_status\n");
  2742. goto out_reject;
  2743. }
  2744. cmd->bufflen = se_cmd->data_length;
  2745. if (qlt_has_data(cmd)) {
  2746. if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
  2747. goto out_reject;
  2748. spin_lock_irqsave(&ha->hardware_lock, flags);
  2749. qlt_send_notify_ack(vha, ntfy,
  2750. 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
  2751. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2752. resp = 1;
  2753. } else {
  2754. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
  2755. "qla_target(%d): SRR for in data for cmd "
  2756. "without them (tag %d, SCSI status %d), "
  2757. "reject", vha->vp_idx, cmd->tag,
  2758. cmd->se_cmd.scsi_status);
  2759. goto out_reject;
  2760. }
  2761. break;
  2762. case SRR_IU_DATA_OUT:
  2763. if (!cmd->sg || !cmd->sg_cnt) {
  2764. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
  2765. "Unable to process SRR_IU_DATA_OUT due to"
  2766. " missing cmd->sg\n");
  2767. dump_stack();
  2768. goto out_reject;
  2769. }
  2770. if (se_cmd->scsi_status != 0) {
  2771. ql_dbg(ql_dbg_tgt, vha, 0xe02b,
  2772. "Rejecting SRR_IU_DATA_OUT"
  2773. " with non GOOD scsi_status\n");
  2774. goto out_reject;
  2775. }
  2776. cmd->bufflen = se_cmd->data_length;
  2777. if (qlt_has_data(cmd)) {
  2778. if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
  2779. goto out_reject;
  2780. spin_lock_irqsave(&ha->hardware_lock, flags);
  2781. qlt_send_notify_ack(vha, ntfy,
  2782. 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
  2783. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2784. if (xmit_type & QLA_TGT_XMIT_DATA)
  2785. qlt_rdy_to_xfer(cmd);
  2786. } else {
  2787. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
  2788. "qla_target(%d): SRR for out data for cmd "
  2789. "without them (tag %d, SCSI status %d), "
  2790. "reject", vha->vp_idx, cmd->tag,
  2791. cmd->se_cmd.scsi_status);
  2792. goto out_reject;
  2793. }
  2794. break;
  2795. default:
  2796. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
  2797. "qla_target(%d): Unknown srr_ui value %x",
  2798. vha->vp_idx, srr_ui);
  2799. goto out_reject;
  2800. }
  2801. /* Transmit response in case of status and data-in cases */
  2802. if (resp)
  2803. qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
  2804. return;
  2805. out_reject:
  2806. spin_lock_irqsave(&ha->hardware_lock, flags);
  2807. qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
  2808. NOTIFY_ACK_SRR_FLAGS_REJECT,
  2809. NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
  2810. NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
  2811. if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
  2812. cmd->state = QLA_TGT_STATE_DATA_IN;
  2813. dump_stack();
  2814. } else
  2815. qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
  2816. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2817. }
  2818. static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
  2819. struct qla_tgt_srr_imm *imm, int ha_locked)
  2820. {
  2821. struct qla_hw_data *ha = vha->hw;
  2822. unsigned long flags = 0;
  2823. if (!ha_locked)
  2824. spin_lock_irqsave(&ha->hardware_lock, flags);
  2825. qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
  2826. NOTIFY_ACK_SRR_FLAGS_REJECT,
  2827. NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
  2828. NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
  2829. if (!ha_locked)
  2830. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2831. kfree(imm);
  2832. }
  2833. static void qlt_handle_srr_work(struct work_struct *work)
  2834. {
  2835. struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
  2836. struct scsi_qla_host *vha = tgt->vha;
  2837. struct qla_tgt_srr_ctio *sctio;
  2838. unsigned long flags;
  2839. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
  2840. tgt);
  2841. restart:
  2842. spin_lock_irqsave(&tgt->srr_lock, flags);
  2843. list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
  2844. struct qla_tgt_srr_imm *imm, *i, *ti;
  2845. struct qla_tgt_cmd *cmd;
  2846. struct se_cmd *se_cmd;
  2847. imm = NULL;
  2848. list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
  2849. srr_list_entry) {
  2850. if (i->srr_id == sctio->srr_id) {
  2851. list_del(&i->srr_list_entry);
  2852. if (imm) {
  2853. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
  2854. "qla_target(%d): There must be "
  2855. "only one IMM SRR per CTIO SRR "
  2856. "(IMM SRR %p, id %d, CTIO %p\n",
  2857. vha->vp_idx, i, i->srr_id, sctio);
  2858. qlt_reject_free_srr_imm(tgt->vha, i, 0);
  2859. } else
  2860. imm = i;
  2861. }
  2862. }
  2863. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
  2864. "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
  2865. sctio->srr_id);
  2866. if (imm == NULL) {
  2867. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
  2868. "Not found matching IMM for SRR CTIO (id %d)\n",
  2869. sctio->srr_id);
  2870. continue;
  2871. } else
  2872. list_del(&sctio->srr_list_entry);
  2873. spin_unlock_irqrestore(&tgt->srr_lock, flags);
  2874. cmd = sctio->cmd;
  2875. /*
  2876. * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
  2877. * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
  2878. * logic..
  2879. */
  2880. cmd->offset = 0;
  2881. if (cmd->free_sg) {
  2882. kfree(cmd->sg);
  2883. cmd->sg = NULL;
  2884. cmd->free_sg = 0;
  2885. }
  2886. se_cmd = &cmd->se_cmd;
  2887. cmd->sg_cnt = se_cmd->t_data_nents;
  2888. cmd->sg = se_cmd->t_data_sg;
  2889. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
  2890. "SRR cmd %p (se_cmd %p, tag %d, op %x), "
  2891. "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
  2892. se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
  2893. qlt_handle_srr(vha, sctio, imm);
  2894. kfree(imm);
  2895. kfree(sctio);
  2896. goto restart;
  2897. }
  2898. spin_unlock_irqrestore(&tgt->srr_lock, flags);
  2899. }
  2900. /* ha->hardware_lock supposed to be held on entry */
  2901. static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
  2902. struct imm_ntfy_from_isp *iocb)
  2903. {
  2904. struct qla_tgt_srr_imm *imm;
  2905. struct qla_hw_data *ha = vha->hw;
  2906. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  2907. struct qla_tgt_srr_ctio *sctio;
  2908. tgt->imm_srr_id++;
  2909. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
  2910. vha->vp_idx);
  2911. imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
  2912. if (imm != NULL) {
  2913. memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
  2914. /* IRQ is already OFF */
  2915. spin_lock(&tgt->srr_lock);
  2916. imm->srr_id = tgt->imm_srr_id;
  2917. list_add_tail(&imm->srr_list_entry,
  2918. &tgt->srr_imm_list);
  2919. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
  2920. "IMM NTFY SRR %p added (id %d, ui %x)\n",
  2921. imm, imm->srr_id, iocb->u.isp24.srr_ui);
  2922. if (tgt->imm_srr_id == tgt->ctio_srr_id) {
  2923. int found = 0;
  2924. list_for_each_entry(sctio, &tgt->srr_ctio_list,
  2925. srr_list_entry) {
  2926. if (sctio->srr_id == imm->srr_id) {
  2927. found = 1;
  2928. break;
  2929. }
  2930. }
  2931. if (found) {
  2932. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
  2933. "Scheduling srr work\n");
  2934. schedule_work(&tgt->srr_work);
  2935. } else {
  2936. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
  2937. "qla_target(%d): imm_srr_id "
  2938. "== ctio_srr_id (%d), but there is no "
  2939. "corresponding SRR CTIO, deleting IMM "
  2940. "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
  2941. imm);
  2942. list_del(&imm->srr_list_entry);
  2943. kfree(imm);
  2944. spin_unlock(&tgt->srr_lock);
  2945. goto out_reject;
  2946. }
  2947. }
  2948. spin_unlock(&tgt->srr_lock);
  2949. } else {
  2950. struct qla_tgt_srr_ctio *ts;
  2951. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
  2952. "qla_target(%d): Unable to allocate SRR IMM "
  2953. "entry, SRR request will be rejected\n", vha->vp_idx);
  2954. /* IRQ is already OFF */
  2955. spin_lock(&tgt->srr_lock);
  2956. list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
  2957. srr_list_entry) {
  2958. if (sctio->srr_id == tgt->imm_srr_id) {
  2959. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
  2960. "CTIO SRR %p deleted (id %d)\n",
  2961. sctio, sctio->srr_id);
  2962. list_del(&sctio->srr_list_entry);
  2963. qlt_send_term_exchange(vha, sctio->cmd,
  2964. &sctio->cmd->atio, 1);
  2965. kfree(sctio);
  2966. }
  2967. }
  2968. spin_unlock(&tgt->srr_lock);
  2969. goto out_reject;
  2970. }
  2971. return;
  2972. out_reject:
  2973. qlt_send_notify_ack(vha, iocb, 0, 0, 0,
  2974. NOTIFY_ACK_SRR_FLAGS_REJECT,
  2975. NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
  2976. NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
  2977. }
  2978. /*
  2979. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  2980. */
  2981. static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
  2982. struct imm_ntfy_from_isp *iocb)
  2983. {
  2984. struct qla_hw_data *ha = vha->hw;
  2985. uint32_t add_flags = 0;
  2986. int send_notify_ack = 1;
  2987. uint16_t status;
  2988. status = le16_to_cpu(iocb->u.isp2x.status);
  2989. switch (status) {
  2990. case IMM_NTFY_LIP_RESET:
  2991. {
  2992. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
  2993. "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
  2994. vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
  2995. iocb->u.isp24.status_subcode);
  2996. if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
  2997. send_notify_ack = 0;
  2998. break;
  2999. }
  3000. case IMM_NTFY_LIP_LINK_REINIT:
  3001. {
  3002. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  3003. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
  3004. "qla_target(%d): LINK REINIT (loop %#x, "
  3005. "subcode %x)\n", vha->vp_idx,
  3006. le16_to_cpu(iocb->u.isp24.nport_handle),
  3007. iocb->u.isp24.status_subcode);
  3008. if (tgt->link_reinit_iocb_pending) {
  3009. qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
  3010. 0, 0, 0, 0, 0, 0);
  3011. }
  3012. memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
  3013. tgt->link_reinit_iocb_pending = 1;
  3014. /*
  3015. * QLogic requires to wait after LINK REINIT for possible
  3016. * PDISC or ADISC ELS commands
  3017. */
  3018. send_notify_ack = 0;
  3019. break;
  3020. }
  3021. case IMM_NTFY_PORT_LOGOUT:
  3022. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
  3023. "qla_target(%d): Port logout (loop "
  3024. "%#x, subcode %x)\n", vha->vp_idx,
  3025. le16_to_cpu(iocb->u.isp24.nport_handle),
  3026. iocb->u.isp24.status_subcode);
  3027. if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
  3028. send_notify_ack = 0;
  3029. /* The sessions will be cleared in the callback, if needed */
  3030. break;
  3031. case IMM_NTFY_GLBL_TPRLO:
  3032. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
  3033. "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
  3034. if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
  3035. send_notify_ack = 0;
  3036. /* The sessions will be cleared in the callback, if needed */
  3037. break;
  3038. case IMM_NTFY_PORT_CONFIG:
  3039. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
  3040. "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
  3041. status);
  3042. if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
  3043. send_notify_ack = 0;
  3044. /* The sessions will be cleared in the callback, if needed */
  3045. break;
  3046. case IMM_NTFY_GLBL_LOGO:
  3047. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
  3048. "qla_target(%d): Link failure detected\n",
  3049. vha->vp_idx);
  3050. /* I_T nexus loss */
  3051. if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
  3052. send_notify_ack = 0;
  3053. break;
  3054. case IMM_NTFY_IOCB_OVERFLOW:
  3055. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
  3056. "qla_target(%d): Cannot provide requested "
  3057. "capability (IOCB overflowed the immediate notify "
  3058. "resource count)\n", vha->vp_idx);
  3059. break;
  3060. case IMM_NTFY_ABORT_TASK:
  3061. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
  3062. "qla_target(%d): Abort Task (S %08x I %#x -> "
  3063. "L %#x)\n", vha->vp_idx,
  3064. le16_to_cpu(iocb->u.isp2x.seq_id),
  3065. GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
  3066. le16_to_cpu(iocb->u.isp2x.lun));
  3067. if (qlt_abort_task(vha, iocb) == 0)
  3068. send_notify_ack = 0;
  3069. break;
  3070. case IMM_NTFY_RESOURCE:
  3071. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
  3072. "qla_target(%d): Out of resources, host %ld\n",
  3073. vha->vp_idx, vha->host_no);
  3074. break;
  3075. case IMM_NTFY_MSG_RX:
  3076. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
  3077. "qla_target(%d): Immediate notify task %x\n",
  3078. vha->vp_idx, iocb->u.isp2x.task_flags);
  3079. if (qlt_handle_task_mgmt(vha, iocb) == 0)
  3080. send_notify_ack = 0;
  3081. break;
  3082. case IMM_NTFY_ELS:
  3083. if (qlt_24xx_handle_els(vha, iocb) == 0)
  3084. send_notify_ack = 0;
  3085. break;
  3086. case IMM_NTFY_SRR:
  3087. qlt_prepare_srr_imm(vha, iocb);
  3088. send_notify_ack = 0;
  3089. break;
  3090. default:
  3091. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
  3092. "qla_target(%d): Received unknown immediate "
  3093. "notify status %x\n", vha->vp_idx, status);
  3094. break;
  3095. }
  3096. if (send_notify_ack)
  3097. qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
  3098. }
  3099. /*
  3100. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  3101. * This function sends busy to ISP 2xxx or 24xx.
  3102. */
  3103. static void qlt_send_busy(struct scsi_qla_host *vha,
  3104. struct atio_from_isp *atio, uint16_t status)
  3105. {
  3106. struct ctio7_to_24xx *ctio24;
  3107. struct qla_hw_data *ha = vha->hw;
  3108. request_t *pkt;
  3109. struct qla_tgt_sess *sess = NULL;
  3110. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
  3111. atio->u.isp24.fcp_hdr.s_id);
  3112. if (!sess) {
  3113. qlt_send_term_exchange(vha, NULL, atio, 1);
  3114. return;
  3115. }
  3116. /* Sending marker isn't necessary, since we called from ISR */
  3117. pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
  3118. if (!pkt) {
  3119. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
  3120. "qla_target(%d): %s failed: unable to allocate "
  3121. "request packet", vha->vp_idx, __func__);
  3122. return;
  3123. }
  3124. pkt->entry_count = 1;
  3125. pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
  3126. ctio24 = (struct ctio7_to_24xx *)pkt;
  3127. ctio24->entry_type = CTIO_TYPE7;
  3128. ctio24->nport_handle = sess->loop_id;
  3129. ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
  3130. ctio24->vp_index = vha->vp_idx;
  3131. ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
  3132. ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
  3133. ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
  3134. ctio24->exchange_addr = atio->u.isp24.exchange_addr;
  3135. ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
  3136. __constant_cpu_to_le16(
  3137. CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
  3138. CTIO7_FLAGS_DONT_RET_CTIO);
  3139. /*
  3140. * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
  3141. * if the explicit conformation is used.
  3142. */
  3143. ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
  3144. ctio24->u.status1.scsi_status = cpu_to_le16(status);
  3145. ctio24->u.status1.residual = get_unaligned((uint32_t *)
  3146. &atio->u.isp24.fcp_cmnd.add_cdb[
  3147. atio->u.isp24.fcp_cmnd.add_cdb_len]);
  3148. if (ctio24->u.status1.residual != 0)
  3149. ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
  3150. qla2x00_start_iocbs(vha, vha->req);
  3151. }
  3152. /* ha->hardware_lock supposed to be held on entry */
  3153. /* called via callback from qla2xxx */
  3154. static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
  3155. struct atio_from_isp *atio)
  3156. {
  3157. struct qla_hw_data *ha = vha->hw;
  3158. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  3159. int rc;
  3160. if (unlikely(tgt == NULL)) {
  3161. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
  3162. "ATIO pkt, but no tgt (ha %p)", ha);
  3163. return;
  3164. }
  3165. ql_dbg(ql_dbg_tgt, vha, 0xe02c,
  3166. "qla_target(%d): ATIO pkt %p: type %02x count %02x",
  3167. vha->vp_idx, atio, atio->u.raw.entry_type,
  3168. atio->u.raw.entry_count);
  3169. /*
  3170. * In tgt_stop mode we also should allow all requests to pass.
  3171. * Otherwise, some commands can stuck.
  3172. */
  3173. tgt->irq_cmd_count++;
  3174. switch (atio->u.raw.entry_type) {
  3175. case ATIO_TYPE7:
  3176. ql_dbg(ql_dbg_tgt, vha, 0xe02d,
  3177. "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
  3178. "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
  3179. vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
  3180. atio->u.isp24.fcp_cmnd.rddata,
  3181. atio->u.isp24.fcp_cmnd.wrdata,
  3182. atio->u.isp24.fcp_cmnd.add_cdb_len,
  3183. be32_to_cpu(get_unaligned((uint32_t *)
  3184. &atio->u.isp24.fcp_cmnd.add_cdb[
  3185. atio->u.isp24.fcp_cmnd.add_cdb_len])),
  3186. atio->u.isp24.fcp_hdr.s_id[0],
  3187. atio->u.isp24.fcp_hdr.s_id[1],
  3188. atio->u.isp24.fcp_hdr.s_id[2]);
  3189. if (unlikely(atio->u.isp24.exchange_addr ==
  3190. ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
  3191. ql_dbg(ql_dbg_tgt, vha, 0xe058,
  3192. "qla_target(%d): ATIO_TYPE7 "
  3193. "received with UNKNOWN exchange address, "
  3194. "sending QUEUE_FULL\n", vha->vp_idx);
  3195. qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
  3196. break;
  3197. }
  3198. if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
  3199. rc = qlt_handle_cmd_for_atio(vha, atio);
  3200. else
  3201. rc = qlt_handle_task_mgmt(vha, atio);
  3202. if (unlikely(rc != 0)) {
  3203. if (rc == -ESRCH) {
  3204. #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
  3205. qlt_send_busy(vha, atio, SAM_STAT_BUSY);
  3206. #else
  3207. qlt_send_term_exchange(vha, NULL, atio, 1);
  3208. #endif
  3209. } else {
  3210. if (tgt->tgt_stop) {
  3211. ql_dbg(ql_dbg_tgt, vha, 0xe059,
  3212. "qla_target: Unable to send "
  3213. "command to target for req, "
  3214. "ignoring.\n");
  3215. } else {
  3216. ql_dbg(ql_dbg_tgt, vha, 0xe05a,
  3217. "qla_target(%d): Unable to send "
  3218. "command to target, sending BUSY "
  3219. "status.\n", vha->vp_idx);
  3220. qlt_send_busy(vha, atio, SAM_STAT_BUSY);
  3221. }
  3222. }
  3223. }
  3224. break;
  3225. case IMMED_NOTIFY_TYPE:
  3226. {
  3227. if (unlikely(atio->u.isp2x.entry_status != 0)) {
  3228. ql_dbg(ql_dbg_tgt, vha, 0xe05b,
  3229. "qla_target(%d): Received ATIO packet %x "
  3230. "with error status %x\n", vha->vp_idx,
  3231. atio->u.raw.entry_type,
  3232. atio->u.isp2x.entry_status);
  3233. break;
  3234. }
  3235. ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
  3236. qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
  3237. break;
  3238. }
  3239. default:
  3240. ql_dbg(ql_dbg_tgt, vha, 0xe05c,
  3241. "qla_target(%d): Received unknown ATIO atio "
  3242. "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
  3243. break;
  3244. }
  3245. tgt->irq_cmd_count--;
  3246. }
  3247. /* ha->hardware_lock supposed to be held on entry */
  3248. /* called via callback from qla2xxx */
  3249. static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
  3250. {
  3251. struct qla_hw_data *ha = vha->hw;
  3252. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  3253. if (unlikely(tgt == NULL)) {
  3254. ql_dbg(ql_dbg_tgt, vha, 0xe05d,
  3255. "qla_target(%d): Response pkt %x received, but no "
  3256. "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
  3257. return;
  3258. }
  3259. ql_dbg(ql_dbg_tgt, vha, 0xe02f,
  3260. "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
  3261. "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
  3262. pkt->entry_count, pkt->entry_status, pkt->handle);
  3263. /*
  3264. * In tgt_stop mode we also should allow all requests to pass.
  3265. * Otherwise, some commands can stuck.
  3266. */
  3267. tgt->irq_cmd_count++;
  3268. switch (pkt->entry_type) {
  3269. case CTIO_TYPE7:
  3270. {
  3271. struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
  3272. ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
  3273. vha->vp_idx);
  3274. qlt_do_ctio_completion(vha, entry->handle,
  3275. le16_to_cpu(entry->status)|(pkt->entry_status << 16),
  3276. entry);
  3277. break;
  3278. }
  3279. case ACCEPT_TGT_IO_TYPE:
  3280. {
  3281. struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
  3282. int rc;
  3283. ql_dbg(ql_dbg_tgt, vha, 0xe031,
  3284. "ACCEPT_TGT_IO instance %d status %04x "
  3285. "lun %04x read/write %d data_length %04x "
  3286. "target_id %02x rx_id %04x\n ", vha->vp_idx,
  3287. le16_to_cpu(atio->u.isp2x.status),
  3288. le16_to_cpu(atio->u.isp2x.lun),
  3289. atio->u.isp2x.execution_codes,
  3290. le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
  3291. atio), atio->u.isp2x.rx_id);
  3292. if (atio->u.isp2x.status !=
  3293. __constant_cpu_to_le16(ATIO_CDB_VALID)) {
  3294. ql_dbg(ql_dbg_tgt, vha, 0xe05e,
  3295. "qla_target(%d): ATIO with error "
  3296. "status %x received\n", vha->vp_idx,
  3297. le16_to_cpu(atio->u.isp2x.status));
  3298. break;
  3299. }
  3300. ql_dbg(ql_dbg_tgt, vha, 0xe032,
  3301. "FCP CDB: 0x%02x, sizeof(cdb): %lu",
  3302. atio->u.isp2x.cdb[0], (unsigned long
  3303. int)sizeof(atio->u.isp2x.cdb));
  3304. rc = qlt_handle_cmd_for_atio(vha, atio);
  3305. if (unlikely(rc != 0)) {
  3306. if (rc == -ESRCH) {
  3307. #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
  3308. qlt_send_busy(vha, atio, 0);
  3309. #else
  3310. qlt_send_term_exchange(vha, NULL, atio, 1);
  3311. #endif
  3312. } else {
  3313. if (tgt->tgt_stop) {
  3314. ql_dbg(ql_dbg_tgt, vha, 0xe05f,
  3315. "qla_target: Unable to send "
  3316. "command to target, sending TERM "
  3317. "EXCHANGE for rsp\n");
  3318. qlt_send_term_exchange(vha, NULL,
  3319. atio, 1);
  3320. } else {
  3321. ql_dbg(ql_dbg_tgt, vha, 0xe060,
  3322. "qla_target(%d): Unable to send "
  3323. "command to target, sending BUSY "
  3324. "status\n", vha->vp_idx);
  3325. qlt_send_busy(vha, atio, 0);
  3326. }
  3327. }
  3328. }
  3329. }
  3330. break;
  3331. case CONTINUE_TGT_IO_TYPE:
  3332. {
  3333. struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
  3334. ql_dbg(ql_dbg_tgt, vha, 0xe033,
  3335. "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
  3336. qlt_do_ctio_completion(vha, entry->handle,
  3337. le16_to_cpu(entry->status)|(pkt->entry_status << 16),
  3338. entry);
  3339. break;
  3340. }
  3341. case CTIO_A64_TYPE:
  3342. {
  3343. struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
  3344. ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
  3345. vha->vp_idx);
  3346. qlt_do_ctio_completion(vha, entry->handle,
  3347. le16_to_cpu(entry->status)|(pkt->entry_status << 16),
  3348. entry);
  3349. break;
  3350. }
  3351. case IMMED_NOTIFY_TYPE:
  3352. ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
  3353. qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
  3354. break;
  3355. case NOTIFY_ACK_TYPE:
  3356. if (tgt->notify_ack_expected > 0) {
  3357. struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
  3358. ql_dbg(ql_dbg_tgt, vha, 0xe036,
  3359. "NOTIFY_ACK seq %08x status %x\n",
  3360. le16_to_cpu(entry->u.isp2x.seq_id),
  3361. le16_to_cpu(entry->u.isp2x.status));
  3362. tgt->notify_ack_expected--;
  3363. if (entry->u.isp2x.status !=
  3364. __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
  3365. ql_dbg(ql_dbg_tgt, vha, 0xe061,
  3366. "qla_target(%d): NOTIFY_ACK "
  3367. "failed %x\n", vha->vp_idx,
  3368. le16_to_cpu(entry->u.isp2x.status));
  3369. }
  3370. } else {
  3371. ql_dbg(ql_dbg_tgt, vha, 0xe062,
  3372. "qla_target(%d): Unexpected NOTIFY_ACK received\n",
  3373. vha->vp_idx);
  3374. }
  3375. break;
  3376. case ABTS_RECV_24XX:
  3377. ql_dbg(ql_dbg_tgt, vha, 0xe037,
  3378. "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
  3379. qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
  3380. break;
  3381. case ABTS_RESP_24XX:
  3382. if (tgt->abts_resp_expected > 0) {
  3383. struct abts_resp_from_24xx_fw *entry =
  3384. (struct abts_resp_from_24xx_fw *)pkt;
  3385. ql_dbg(ql_dbg_tgt, vha, 0xe038,
  3386. "ABTS_RESP_24XX: compl_status %x\n",
  3387. entry->compl_status);
  3388. tgt->abts_resp_expected--;
  3389. if (le16_to_cpu(entry->compl_status) !=
  3390. ABTS_RESP_COMPL_SUCCESS) {
  3391. if ((entry->error_subcode1 == 0x1E) &&
  3392. (entry->error_subcode2 == 0)) {
  3393. /*
  3394. * We've got a race here: aborted
  3395. * exchange not terminated, i.e.
  3396. * response for the aborted command was
  3397. * sent between the abort request was
  3398. * received and processed.
  3399. * Unfortunately, the firmware has a
  3400. * silly requirement that all aborted
  3401. * exchanges must be explicitely
  3402. * terminated, otherwise it refuses to
  3403. * send responses for the abort
  3404. * requests. So, we have to
  3405. * (re)terminate the exchange and retry
  3406. * the abort response.
  3407. */
  3408. qlt_24xx_retry_term_exchange(vha,
  3409. entry);
  3410. } else
  3411. ql_dbg(ql_dbg_tgt, vha, 0xe063,
  3412. "qla_target(%d): ABTS_RESP_24XX "
  3413. "failed %x (subcode %x:%x)",
  3414. vha->vp_idx, entry->compl_status,
  3415. entry->error_subcode1,
  3416. entry->error_subcode2);
  3417. }
  3418. } else {
  3419. ql_dbg(ql_dbg_tgt, vha, 0xe064,
  3420. "qla_target(%d): Unexpected ABTS_RESP_24XX "
  3421. "received\n", vha->vp_idx);
  3422. }
  3423. break;
  3424. default:
  3425. ql_dbg(ql_dbg_tgt, vha, 0xe065,
  3426. "qla_target(%d): Received unknown response pkt "
  3427. "type %x\n", vha->vp_idx, pkt->entry_type);
  3428. break;
  3429. }
  3430. tgt->irq_cmd_count--;
  3431. }
  3432. /*
  3433. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  3434. */
  3435. void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
  3436. uint16_t *mailbox)
  3437. {
  3438. struct qla_hw_data *ha = vha->hw;
  3439. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  3440. int login_code;
  3441. ql_dbg(ql_dbg_tgt, vha, 0xe039,
  3442. "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
  3443. vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
  3444. ha->operating_mode, ha->current_topology);
  3445. if (!ha->tgt.tgt_ops)
  3446. return;
  3447. if (unlikely(tgt == NULL)) {
  3448. ql_dbg(ql_dbg_tgt, vha, 0xe03a,
  3449. "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
  3450. return;
  3451. }
  3452. if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
  3453. IS_QLA2100(ha))
  3454. return;
  3455. /*
  3456. * In tgt_stop mode we also should allow all requests to pass.
  3457. * Otherwise, some commands can stuck.
  3458. */
  3459. tgt->irq_cmd_count++;
  3460. switch (code) {
  3461. case MBA_RESET: /* Reset */
  3462. case MBA_SYSTEM_ERR: /* System Error */
  3463. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  3464. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  3465. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
  3466. "qla_target(%d): System error async event %#x "
  3467. "occured", vha->vp_idx, code);
  3468. break;
  3469. case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
  3470. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  3471. break;
  3472. case MBA_LOOP_UP:
  3473. {
  3474. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
  3475. "qla_target(%d): Async LOOP_UP occured "
  3476. "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
  3477. le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
  3478. le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
  3479. if (tgt->link_reinit_iocb_pending) {
  3480. qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
  3481. 0, 0, 0, 0, 0, 0);
  3482. tgt->link_reinit_iocb_pending = 0;
  3483. }
  3484. break;
  3485. }
  3486. case MBA_LIP_OCCURRED:
  3487. case MBA_LOOP_DOWN:
  3488. case MBA_LIP_RESET:
  3489. case MBA_RSCN_UPDATE:
  3490. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
  3491. "qla_target(%d): Async event %#x occured "
  3492. "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
  3493. le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
  3494. le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
  3495. break;
  3496. case MBA_PORT_UPDATE:
  3497. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
  3498. "qla_target(%d): Port update async event %#x "
  3499. "occured: updating the ports database (m[0]=%x, m[1]=%x, "
  3500. "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
  3501. le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
  3502. le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
  3503. login_code = le16_to_cpu(mailbox[2]);
  3504. if (login_code == 0x4)
  3505. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
  3506. "Async MB 2: Got PLOGI Complete\n");
  3507. else if (login_code == 0x7)
  3508. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
  3509. "Async MB 2: Port Logged Out\n");
  3510. break;
  3511. default:
  3512. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
  3513. "qla_target(%d): Async event %#x occured: "
  3514. "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
  3515. code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
  3516. le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
  3517. break;
  3518. }
  3519. tgt->irq_cmd_count--;
  3520. }
  3521. static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
  3522. uint16_t loop_id)
  3523. {
  3524. fc_port_t *fcport;
  3525. int rc;
  3526. fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
  3527. if (!fcport) {
  3528. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
  3529. "qla_target(%d): Allocation of tmp FC port failed",
  3530. vha->vp_idx);
  3531. return NULL;
  3532. }
  3533. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
  3534. fcport->loop_id = loop_id;
  3535. rc = qla2x00_get_port_database(vha, fcport, 0);
  3536. if (rc != QLA_SUCCESS) {
  3537. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
  3538. "qla_target(%d): Failed to retrieve fcport "
  3539. "information -- get_port_database() returned %x "
  3540. "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
  3541. kfree(fcport);
  3542. return NULL;
  3543. }
  3544. return fcport;
  3545. }
  3546. /* Must be called under tgt_mutex */
  3547. static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
  3548. uint8_t *s_id)
  3549. {
  3550. struct qla_hw_data *ha = vha->hw;
  3551. struct qla_tgt_sess *sess = NULL;
  3552. fc_port_t *fcport = NULL;
  3553. int rc, global_resets;
  3554. uint16_t loop_id = 0;
  3555. retry:
  3556. global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
  3557. rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
  3558. if (rc != 0) {
  3559. if ((s_id[0] == 0xFF) &&
  3560. (s_id[1] == 0xFC)) {
  3561. /*
  3562. * This is Domain Controller, so it should be
  3563. * OK to drop SCSI commands from it.
  3564. */
  3565. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
  3566. "Unable to find initiator with S_ID %x:%x:%x",
  3567. s_id[0], s_id[1], s_id[2]);
  3568. } else
  3569. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
  3570. "qla_target(%d): Unable to find "
  3571. "initiator with S_ID %x:%x:%x",
  3572. vha->vp_idx, s_id[0], s_id[1],
  3573. s_id[2]);
  3574. return NULL;
  3575. }
  3576. fcport = qlt_get_port_database(vha, loop_id);
  3577. if (!fcport)
  3578. return NULL;
  3579. if (global_resets !=
  3580. atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
  3581. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
  3582. "qla_target(%d): global reset during session discovery "
  3583. "(counter was %d, new %d), retrying", vha->vp_idx,
  3584. global_resets,
  3585. atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
  3586. goto retry;
  3587. }
  3588. sess = qlt_create_sess(vha, fcport, true);
  3589. kfree(fcport);
  3590. return sess;
  3591. }
  3592. static void qlt_abort_work(struct qla_tgt *tgt,
  3593. struct qla_tgt_sess_work_param *prm)
  3594. {
  3595. struct scsi_qla_host *vha = tgt->vha;
  3596. struct qla_hw_data *ha = vha->hw;
  3597. struct qla_tgt_sess *sess = NULL;
  3598. unsigned long flags;
  3599. uint32_t be_s_id;
  3600. uint8_t s_id[3];
  3601. int rc;
  3602. spin_lock_irqsave(&ha->hardware_lock, flags);
  3603. if (tgt->tgt_stop)
  3604. goto out_term;
  3605. s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
  3606. s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
  3607. s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
  3608. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
  3609. (unsigned char *)&be_s_id);
  3610. if (!sess) {
  3611. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3612. mutex_lock(&ha->tgt.tgt_mutex);
  3613. sess = qlt_make_local_sess(vha, s_id);
  3614. /* sess has got an extra creation ref */
  3615. mutex_unlock(&ha->tgt.tgt_mutex);
  3616. spin_lock_irqsave(&ha->hardware_lock, flags);
  3617. if (!sess)
  3618. goto out_term;
  3619. } else {
  3620. kref_get(&sess->se_sess->sess_kref);
  3621. }
  3622. if (tgt->tgt_stop)
  3623. goto out_term;
  3624. rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
  3625. if (rc != 0)
  3626. goto out_term;
  3627. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3628. ha->tgt.tgt_ops->put_sess(sess);
  3629. return;
  3630. out_term:
  3631. qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
  3632. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3633. if (sess)
  3634. ha->tgt.tgt_ops->put_sess(sess);
  3635. }
  3636. static void qlt_tmr_work(struct qla_tgt *tgt,
  3637. struct qla_tgt_sess_work_param *prm)
  3638. {
  3639. struct atio_from_isp *a = &prm->tm_iocb2;
  3640. struct scsi_qla_host *vha = tgt->vha;
  3641. struct qla_hw_data *ha = vha->hw;
  3642. struct qla_tgt_sess *sess = NULL;
  3643. unsigned long flags;
  3644. uint8_t *s_id = NULL; /* to hide compiler warnings */
  3645. int rc;
  3646. uint32_t lun, unpacked_lun;
  3647. int lun_size, fn;
  3648. void *iocb;
  3649. spin_lock_irqsave(&ha->hardware_lock, flags);
  3650. if (tgt->tgt_stop)
  3651. goto out_term;
  3652. s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
  3653. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
  3654. if (!sess) {
  3655. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3656. mutex_lock(&ha->tgt.tgt_mutex);
  3657. sess = qlt_make_local_sess(vha, s_id);
  3658. /* sess has got an extra creation ref */
  3659. mutex_unlock(&ha->tgt.tgt_mutex);
  3660. spin_lock_irqsave(&ha->hardware_lock, flags);
  3661. if (!sess)
  3662. goto out_term;
  3663. } else {
  3664. kref_get(&sess->se_sess->sess_kref);
  3665. }
  3666. iocb = a;
  3667. lun = a->u.isp24.fcp_cmnd.lun;
  3668. lun_size = sizeof(lun);
  3669. fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
  3670. unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
  3671. rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
  3672. if (rc != 0)
  3673. goto out_term;
  3674. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3675. ha->tgt.tgt_ops->put_sess(sess);
  3676. return;
  3677. out_term:
  3678. qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
  3679. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3680. if (sess)
  3681. ha->tgt.tgt_ops->put_sess(sess);
  3682. }
  3683. static void qlt_sess_work_fn(struct work_struct *work)
  3684. {
  3685. struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
  3686. struct scsi_qla_host *vha = tgt->vha;
  3687. unsigned long flags;
  3688. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
  3689. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  3690. while (!list_empty(&tgt->sess_works_list)) {
  3691. struct qla_tgt_sess_work_param *prm = list_entry(
  3692. tgt->sess_works_list.next, typeof(*prm),
  3693. sess_works_list_entry);
  3694. /*
  3695. * This work can be scheduled on several CPUs at time, so we
  3696. * must delete the entry to eliminate double processing
  3697. */
  3698. list_del(&prm->sess_works_list_entry);
  3699. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  3700. switch (prm->type) {
  3701. case QLA_TGT_SESS_WORK_ABORT:
  3702. qlt_abort_work(tgt, prm);
  3703. break;
  3704. case QLA_TGT_SESS_WORK_TM:
  3705. qlt_tmr_work(tgt, prm);
  3706. break;
  3707. default:
  3708. BUG_ON(1);
  3709. break;
  3710. }
  3711. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  3712. kfree(prm);
  3713. }
  3714. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  3715. }
  3716. /* Must be called under tgt_host_action_mutex */
  3717. int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
  3718. {
  3719. struct qla_tgt *tgt;
  3720. if (!QLA_TGT_MODE_ENABLED())
  3721. return 0;
  3722. ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
  3723. "Registering target for host %ld(%p)", base_vha->host_no, ha);
  3724. BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
  3725. tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
  3726. if (!tgt) {
  3727. ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
  3728. "Unable to allocate struct qla_tgt\n");
  3729. return -ENOMEM;
  3730. }
  3731. if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
  3732. base_vha->host->hostt->supported_mode |= MODE_TARGET;
  3733. tgt->ha = ha;
  3734. tgt->vha = base_vha;
  3735. init_waitqueue_head(&tgt->waitQ);
  3736. INIT_LIST_HEAD(&tgt->sess_list);
  3737. INIT_LIST_HEAD(&tgt->del_sess_list);
  3738. INIT_DELAYED_WORK(&tgt->sess_del_work,
  3739. (void (*)(struct work_struct *))qlt_del_sess_work_fn);
  3740. spin_lock_init(&tgt->sess_work_lock);
  3741. INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
  3742. INIT_LIST_HEAD(&tgt->sess_works_list);
  3743. spin_lock_init(&tgt->srr_lock);
  3744. INIT_LIST_HEAD(&tgt->srr_ctio_list);
  3745. INIT_LIST_HEAD(&tgt->srr_imm_list);
  3746. INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
  3747. atomic_set(&tgt->tgt_global_resets_count, 0);
  3748. ha->tgt.qla_tgt = tgt;
  3749. ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
  3750. "qla_target(%d): using 64 Bit PCI addressing",
  3751. base_vha->vp_idx);
  3752. tgt->tgt_enable_64bit_addr = 1;
  3753. /* 3 is reserved */
  3754. tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
  3755. tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
  3756. tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
  3757. mutex_lock(&qla_tgt_mutex);
  3758. list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
  3759. mutex_unlock(&qla_tgt_mutex);
  3760. return 0;
  3761. }
  3762. /* Must be called under tgt_host_action_mutex */
  3763. int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
  3764. {
  3765. if (!ha->tgt.qla_tgt)
  3766. return 0;
  3767. mutex_lock(&qla_tgt_mutex);
  3768. list_del(&ha->tgt.qla_tgt->tgt_list_entry);
  3769. mutex_unlock(&qla_tgt_mutex);
  3770. ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
  3771. vha->host_no, ha);
  3772. qlt_release(ha->tgt.qla_tgt);
  3773. return 0;
  3774. }
  3775. static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
  3776. unsigned char *b)
  3777. {
  3778. int i;
  3779. pr_debug("qla2xxx HW vha->node_name: ");
  3780. for (i = 0; i < WWN_SIZE; i++)
  3781. pr_debug("%02x ", vha->node_name[i]);
  3782. pr_debug("\n");
  3783. pr_debug("qla2xxx HW vha->port_name: ");
  3784. for (i = 0; i < WWN_SIZE; i++)
  3785. pr_debug("%02x ", vha->port_name[i]);
  3786. pr_debug("\n");
  3787. pr_debug("qla2xxx passed configfs WWPN: ");
  3788. put_unaligned_be64(wwpn, b);
  3789. for (i = 0; i < WWN_SIZE; i++)
  3790. pr_debug("%02x ", b[i]);
  3791. pr_debug("\n");
  3792. }
  3793. /**
  3794. * qla_tgt_lport_register - register lport with external module
  3795. *
  3796. * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
  3797. * @wwpn: Passwd FC target WWPN
  3798. * @callback: lport initialization callback for tcm_qla2xxx code
  3799. * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
  3800. */
  3801. int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
  3802. int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
  3803. {
  3804. struct qla_tgt *tgt;
  3805. struct scsi_qla_host *vha;
  3806. struct qla_hw_data *ha;
  3807. struct Scsi_Host *host;
  3808. unsigned long flags;
  3809. int rc;
  3810. u8 b[WWN_SIZE];
  3811. mutex_lock(&qla_tgt_mutex);
  3812. list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
  3813. vha = tgt->vha;
  3814. ha = vha->hw;
  3815. host = vha->host;
  3816. if (!host)
  3817. continue;
  3818. if (ha->tgt.tgt_ops != NULL)
  3819. continue;
  3820. if (!(host->hostt->supported_mode & MODE_TARGET))
  3821. continue;
  3822. spin_lock_irqsave(&ha->hardware_lock, flags);
  3823. if (host->active_mode & MODE_TARGET) {
  3824. pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
  3825. host->host_no);
  3826. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3827. continue;
  3828. }
  3829. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3830. if (!scsi_host_get(host)) {
  3831. ql_dbg(ql_dbg_tgt, vha, 0xe068,
  3832. "Unable to scsi_host_get() for"
  3833. " qla2xxx scsi_host\n");
  3834. continue;
  3835. }
  3836. qlt_lport_dump(vha, wwpn, b);
  3837. if (memcmp(vha->port_name, b, WWN_SIZE)) {
  3838. scsi_host_put(host);
  3839. continue;
  3840. }
  3841. /*
  3842. * Setup passed parameters ahead of invoking callback
  3843. */
  3844. ha->tgt.tgt_ops = qla_tgt_ops;
  3845. ha->tgt.target_lport_ptr = target_lport_ptr;
  3846. rc = (*callback)(vha);
  3847. if (rc != 0) {
  3848. ha->tgt.tgt_ops = NULL;
  3849. ha->tgt.target_lport_ptr = NULL;
  3850. }
  3851. mutex_unlock(&qla_tgt_mutex);
  3852. return rc;
  3853. }
  3854. mutex_unlock(&qla_tgt_mutex);
  3855. return -ENODEV;
  3856. }
  3857. EXPORT_SYMBOL(qlt_lport_register);
  3858. /**
  3859. * qla_tgt_lport_deregister - Degister lport
  3860. *
  3861. * @vha: Registered scsi_qla_host pointer
  3862. */
  3863. void qlt_lport_deregister(struct scsi_qla_host *vha)
  3864. {
  3865. struct qla_hw_data *ha = vha->hw;
  3866. struct Scsi_Host *sh = vha->host;
  3867. /*
  3868. * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
  3869. */
  3870. ha->tgt.target_lport_ptr = NULL;
  3871. ha->tgt.tgt_ops = NULL;
  3872. /*
  3873. * Release the Scsi_Host reference for the underlying qla2xxx host
  3874. */
  3875. scsi_host_put(sh);
  3876. }
  3877. EXPORT_SYMBOL(qlt_lport_deregister);
  3878. /* Must be called under HW lock */
  3879. void qlt_set_mode(struct scsi_qla_host *vha)
  3880. {
  3881. struct qla_hw_data *ha = vha->hw;
  3882. switch (ql2x_ini_mode) {
  3883. case QLA2XXX_INI_MODE_DISABLED:
  3884. case QLA2XXX_INI_MODE_EXCLUSIVE:
  3885. vha->host->active_mode = MODE_TARGET;
  3886. break;
  3887. case QLA2XXX_INI_MODE_ENABLED:
  3888. vha->host->active_mode |= MODE_TARGET;
  3889. break;
  3890. default:
  3891. break;
  3892. }
  3893. if (ha->tgt.ini_mode_force_reverse)
  3894. qla_reverse_ini_mode(vha);
  3895. }
  3896. /* Must be called under HW lock */
  3897. void qlt_clear_mode(struct scsi_qla_host *vha)
  3898. {
  3899. struct qla_hw_data *ha = vha->hw;
  3900. switch (ql2x_ini_mode) {
  3901. case QLA2XXX_INI_MODE_DISABLED:
  3902. vha->host->active_mode = MODE_UNKNOWN;
  3903. break;
  3904. case QLA2XXX_INI_MODE_EXCLUSIVE:
  3905. vha->host->active_mode = MODE_INITIATOR;
  3906. break;
  3907. case QLA2XXX_INI_MODE_ENABLED:
  3908. vha->host->active_mode &= ~MODE_TARGET;
  3909. break;
  3910. default:
  3911. break;
  3912. }
  3913. if (ha->tgt.ini_mode_force_reverse)
  3914. qla_reverse_ini_mode(vha);
  3915. }
  3916. /*
  3917. * qla_tgt_enable_vha - NO LOCK HELD
  3918. *
  3919. * host_reset, bring up w/ Target Mode Enabled
  3920. */
  3921. void
  3922. qlt_enable_vha(struct scsi_qla_host *vha)
  3923. {
  3924. struct qla_hw_data *ha = vha->hw;
  3925. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  3926. unsigned long flags;
  3927. if (!tgt) {
  3928. ql_dbg(ql_dbg_tgt, vha, 0xe069,
  3929. "Unable to locate qla_tgt pointer from"
  3930. " struct qla_hw_data\n");
  3931. dump_stack();
  3932. return;
  3933. }
  3934. spin_lock_irqsave(&ha->hardware_lock, flags);
  3935. tgt->tgt_stopped = 0;
  3936. qlt_set_mode(vha);
  3937. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3938. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  3939. qla2xxx_wake_dpc(vha);
  3940. qla2x00_wait_for_hba_online(vha);
  3941. }
  3942. EXPORT_SYMBOL(qlt_enable_vha);
  3943. /*
  3944. * qla_tgt_disable_vha - NO LOCK HELD
  3945. *
  3946. * Disable Target Mode and reset the adapter
  3947. */
  3948. void
  3949. qlt_disable_vha(struct scsi_qla_host *vha)
  3950. {
  3951. struct qla_hw_data *ha = vha->hw;
  3952. struct qla_tgt *tgt = ha->tgt.qla_tgt;
  3953. unsigned long flags;
  3954. if (!tgt) {
  3955. ql_dbg(ql_dbg_tgt, vha, 0xe06a,
  3956. "Unable to locate qla_tgt pointer from"
  3957. " struct qla_hw_data\n");
  3958. dump_stack();
  3959. return;
  3960. }
  3961. spin_lock_irqsave(&ha->hardware_lock, flags);
  3962. qlt_clear_mode(vha);
  3963. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3964. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  3965. qla2xxx_wake_dpc(vha);
  3966. qla2x00_wait_for_hba_online(vha);
  3967. }
  3968. /*
  3969. * Called from qla_init.c:qla24xx_vport_create() contex to setup
  3970. * the target mode specific struct scsi_qla_host and struct qla_hw_data
  3971. * members.
  3972. */
  3973. void
  3974. qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
  3975. {
  3976. if (!qla_tgt_mode_enabled(vha))
  3977. return;
  3978. mutex_init(&ha->tgt.tgt_mutex);
  3979. mutex_init(&ha->tgt.tgt_host_action_mutex);
  3980. qlt_clear_mode(vha);
  3981. /*
  3982. * NOTE: Currently the value is kept the same for <24xx and
  3983. * >=24xx ISPs. If it is necessary to change it,
  3984. * the check should be added for specific ISPs,
  3985. * assigning the value appropriately.
  3986. */
  3987. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  3988. }
  3989. void
  3990. qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
  3991. {
  3992. /*
  3993. * FC-4 Feature bit 0 indicates target functionality to the name server.
  3994. */
  3995. if (qla_tgt_mode_enabled(vha)) {
  3996. if (qla_ini_mode_enabled(vha))
  3997. ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
  3998. else
  3999. ct_req->req.rff_id.fc4_feature = BIT_0;
  4000. } else if (qla_ini_mode_enabled(vha)) {
  4001. ct_req->req.rff_id.fc4_feature = BIT_1;
  4002. }
  4003. }
  4004. /*
  4005. * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
  4006. * @ha: HA context
  4007. *
  4008. * Beginning of ATIO ring has initialization control block already built
  4009. * by nvram config routine.
  4010. *
  4011. * Returns 0 on success.
  4012. */
  4013. void
  4014. qlt_init_atio_q_entries(struct scsi_qla_host *vha)
  4015. {
  4016. struct qla_hw_data *ha = vha->hw;
  4017. uint16_t cnt;
  4018. struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
  4019. if (!qla_tgt_mode_enabled(vha))
  4020. return;
  4021. for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
  4022. pkt->u.raw.signature = ATIO_PROCESSED;
  4023. pkt++;
  4024. }
  4025. }
  4026. /*
  4027. * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
  4028. * @ha: SCSI driver HA context
  4029. */
  4030. void
  4031. qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
  4032. {
  4033. struct qla_hw_data *ha = vha->hw;
  4034. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  4035. struct atio_from_isp *pkt;
  4036. int cnt, i;
  4037. if (!vha->flags.online)
  4038. return;
  4039. while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
  4040. pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
  4041. cnt = pkt->u.raw.entry_count;
  4042. qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
  4043. for (i = 0; i < cnt; i++) {
  4044. ha->tgt.atio_ring_index++;
  4045. if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
  4046. ha->tgt.atio_ring_index = 0;
  4047. ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
  4048. } else
  4049. ha->tgt.atio_ring_ptr++;
  4050. pkt->u.raw.signature = ATIO_PROCESSED;
  4051. pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
  4052. }
  4053. wmb();
  4054. }
  4055. /* Adjust ring index */
  4056. WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
  4057. }
  4058. void
  4059. qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
  4060. {
  4061. struct qla_hw_data *ha = vha->hw;
  4062. /* FIXME: atio_q in/out for ha->mqenable=1..? */
  4063. if (ha->mqenable) {
  4064. #if 0
  4065. WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
  4066. WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
  4067. RD_REG_DWORD(&reg->isp25mq.atio_q_out);
  4068. #endif
  4069. } else {
  4070. /* Setup APTIO registers for target mode */
  4071. WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
  4072. WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
  4073. RD_REG_DWORD(&reg->isp24.atio_q_out);
  4074. }
  4075. }
  4076. void
  4077. qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
  4078. {
  4079. struct qla_hw_data *ha = vha->hw;
  4080. if (qla_tgt_mode_enabled(vha)) {
  4081. if (!ha->tgt.saved_set) {
  4082. /* We save only once */
  4083. ha->tgt.saved_exchange_count = nv->exchange_count;
  4084. ha->tgt.saved_firmware_options_1 =
  4085. nv->firmware_options_1;
  4086. ha->tgt.saved_firmware_options_2 =
  4087. nv->firmware_options_2;
  4088. ha->tgt.saved_firmware_options_3 =
  4089. nv->firmware_options_3;
  4090. ha->tgt.saved_set = 1;
  4091. }
  4092. nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
  4093. /* Enable target mode */
  4094. nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
  4095. /* Disable ini mode, if requested */
  4096. if (!qla_ini_mode_enabled(vha))
  4097. nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
  4098. /* Disable Full Login after LIP */
  4099. nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
  4100. /* Enable initial LIP */
  4101. nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
  4102. /* Enable FC tapes support */
  4103. nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
  4104. /* Disable Full Login after LIP */
  4105. nv->host_p &= __constant_cpu_to_le32(~BIT_10);
  4106. /* Enable target PRLI control */
  4107. nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
  4108. } else {
  4109. if (ha->tgt.saved_set) {
  4110. nv->exchange_count = ha->tgt.saved_exchange_count;
  4111. nv->firmware_options_1 =
  4112. ha->tgt.saved_firmware_options_1;
  4113. nv->firmware_options_2 =
  4114. ha->tgt.saved_firmware_options_2;
  4115. nv->firmware_options_3 =
  4116. ha->tgt.saved_firmware_options_3;
  4117. }
  4118. return;
  4119. }
  4120. /* out-of-order frames reassembly */
  4121. nv->firmware_options_3 |= BIT_6|BIT_9;
  4122. if (ha->tgt.enable_class_2) {
  4123. if (vha->flags.init_done)
  4124. fc_host_supported_classes(vha->host) =
  4125. FC_COS_CLASS2 | FC_COS_CLASS3;
  4126. nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
  4127. } else {
  4128. if (vha->flags.init_done)
  4129. fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
  4130. nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
  4131. }
  4132. }
  4133. void
  4134. qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
  4135. struct init_cb_24xx *icb)
  4136. {
  4137. struct qla_hw_data *ha = vha->hw;
  4138. if (ha->tgt.node_name_set) {
  4139. memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
  4140. icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
  4141. }
  4142. }
  4143. int
  4144. qlt_24xx_process_response_error(struct scsi_qla_host *vha,
  4145. struct sts_entry_24xx *pkt)
  4146. {
  4147. switch (pkt->entry_type) {
  4148. case ABTS_RECV_24XX:
  4149. case ABTS_RESP_24XX:
  4150. case CTIO_TYPE7:
  4151. case NOTIFY_ACK_TYPE:
  4152. return 1;
  4153. default:
  4154. return 0;
  4155. }
  4156. }
  4157. void
  4158. qlt_modify_vp_config(struct scsi_qla_host *vha,
  4159. struct vp_config_entry_24xx *vpmod)
  4160. {
  4161. if (qla_tgt_mode_enabled(vha))
  4162. vpmod->options_idx1 &= ~BIT_5;
  4163. /* Disable ini mode, if requested */
  4164. if (!qla_ini_mode_enabled(vha))
  4165. vpmod->options_idx1 &= ~BIT_4;
  4166. }
  4167. void
  4168. qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
  4169. {
  4170. if (!QLA_TGT_MODE_ENABLED())
  4171. return;
  4172. mutex_init(&ha->tgt.tgt_mutex);
  4173. mutex_init(&ha->tgt.tgt_host_action_mutex);
  4174. qlt_clear_mode(base_vha);
  4175. }
  4176. int
  4177. qlt_mem_alloc(struct qla_hw_data *ha)
  4178. {
  4179. if (!QLA_TGT_MODE_ENABLED())
  4180. return 0;
  4181. ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
  4182. MAX_MULTI_ID_FABRIC, GFP_KERNEL);
  4183. if (!ha->tgt.tgt_vp_map)
  4184. return -ENOMEM;
  4185. ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
  4186. (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
  4187. &ha->tgt.atio_dma, GFP_KERNEL);
  4188. if (!ha->tgt.atio_ring) {
  4189. kfree(ha->tgt.tgt_vp_map);
  4190. return -ENOMEM;
  4191. }
  4192. return 0;
  4193. }
  4194. void
  4195. qlt_mem_free(struct qla_hw_data *ha)
  4196. {
  4197. if (!QLA_TGT_MODE_ENABLED())
  4198. return;
  4199. if (ha->tgt.atio_ring) {
  4200. dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
  4201. sizeof(struct atio_from_isp), ha->tgt.atio_ring,
  4202. ha->tgt.atio_dma);
  4203. }
  4204. kfree(ha->tgt.tgt_vp_map);
  4205. }
  4206. /* vport_slock to be held by the caller */
  4207. void
  4208. qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
  4209. {
  4210. if (!QLA_TGT_MODE_ENABLED())
  4211. return;
  4212. switch (cmd) {
  4213. case SET_VP_IDX:
  4214. vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
  4215. break;
  4216. case SET_AL_PA:
  4217. vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
  4218. break;
  4219. case RESET_VP_IDX:
  4220. vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
  4221. break;
  4222. case RESET_AL_PA:
  4223. vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
  4224. break;
  4225. }
  4226. }
  4227. static int __init qlt_parse_ini_mode(void)
  4228. {
  4229. if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
  4230. ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
  4231. else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
  4232. ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
  4233. else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
  4234. ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
  4235. else
  4236. return false;
  4237. return true;
  4238. }
  4239. int __init qlt_init(void)
  4240. {
  4241. int ret;
  4242. if (!qlt_parse_ini_mode()) {
  4243. ql_log(ql_log_fatal, NULL, 0xe06b,
  4244. "qlt_parse_ini_mode() failed\n");
  4245. return -EINVAL;
  4246. }
  4247. if (!QLA_TGT_MODE_ENABLED())
  4248. return 0;
  4249. qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
  4250. sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
  4251. NULL);
  4252. if (!qla_tgt_cmd_cachep) {
  4253. ql_log(ql_log_fatal, NULL, 0xe06c,
  4254. "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
  4255. return -ENOMEM;
  4256. }
  4257. qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
  4258. sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
  4259. qla_tgt_mgmt_cmd), 0, NULL);
  4260. if (!qla_tgt_mgmt_cmd_cachep) {
  4261. ql_log(ql_log_fatal, NULL, 0xe06d,
  4262. "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
  4263. ret = -ENOMEM;
  4264. goto out;
  4265. }
  4266. qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
  4267. mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
  4268. if (!qla_tgt_mgmt_cmd_mempool) {
  4269. ql_log(ql_log_fatal, NULL, 0xe06e,
  4270. "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
  4271. ret = -ENOMEM;
  4272. goto out_mgmt_cmd_cachep;
  4273. }
  4274. qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
  4275. if (!qla_tgt_wq) {
  4276. ql_log(ql_log_fatal, NULL, 0xe06f,
  4277. "alloc_workqueue for qla_tgt_wq failed\n");
  4278. ret = -ENOMEM;
  4279. goto out_cmd_mempool;
  4280. }
  4281. /*
  4282. * Return 1 to signal that initiator-mode is being disabled
  4283. */
  4284. return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
  4285. out_cmd_mempool:
  4286. mempool_destroy(qla_tgt_mgmt_cmd_mempool);
  4287. out_mgmt_cmd_cachep:
  4288. kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
  4289. out:
  4290. kmem_cache_destroy(qla_tgt_cmd_cachep);
  4291. return ret;
  4292. }
  4293. void qlt_exit(void)
  4294. {
  4295. if (!QLA_TGT_MODE_ENABLED())
  4296. return;
  4297. destroy_workqueue(qla_tgt_wq);
  4298. mempool_destroy(qla_tgt_mgmt_cmd_mempool);
  4299. kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
  4300. kmem_cache_destroy(qla_tgt_cmd_cachep);
  4301. }