target_core_transport.c 130 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761
  1. /*******************************************************************************
  2. * Filename: target_core_transport.c
  3. *
  4. * This file contains the Generic Target Engine Core.
  5. *
  6. * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
  7. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  8. * Copyright (c) 2007-2010 Rising Tide Systems
  9. * Copyright (c) 2008-2010 Linux-iSCSI.org
  10. *
  11. * Nicholas A. Bellinger <nab@kernel.org>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. *
  27. ******************************************************************************/
  28. #include <linux/net.h>
  29. #include <linux/delay.h>
  30. #include <linux/string.h>
  31. #include <linux/timer.h>
  32. #include <linux/slab.h>
  33. #include <linux/blkdev.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/kthread.h>
  36. #include <linux/in.h>
  37. #include <linux/cdrom.h>
  38. #include <linux/module.h>
  39. #include <asm/unaligned.h>
  40. #include <net/sock.h>
  41. #include <net/tcp.h>
  42. #include <scsi/scsi.h>
  43. #include <scsi/scsi_cmnd.h>
  44. #include <scsi/scsi_tcq.h>
  45. #include <target/target_core_base.h>
  46. #include <target/target_core_backend.h>
  47. #include <target/target_core_fabric.h>
  48. #include <target/target_core_configfs.h>
  49. #include "target_core_internal.h"
  50. #include "target_core_alua.h"
  51. #include "target_core_pr.h"
  52. #include "target_core_ua.h"
  53. static int sub_api_initialized;
  54. static struct workqueue_struct *target_completion_wq;
  55. static struct kmem_cache *se_sess_cache;
  56. struct kmem_cache *se_tmr_req_cache;
  57. struct kmem_cache *se_ua_cache;
  58. struct kmem_cache *t10_pr_reg_cache;
  59. struct kmem_cache *t10_alua_lu_gp_cache;
  60. struct kmem_cache *t10_alua_lu_gp_mem_cache;
  61. struct kmem_cache *t10_alua_tg_pt_gp_cache;
  62. struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
  63. static int transport_generic_write_pending(struct se_cmd *);
  64. static int transport_processing_thread(void *param);
  65. static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
  66. static void transport_complete_task_attr(struct se_cmd *cmd);
  67. static void transport_handle_queue_full(struct se_cmd *cmd,
  68. struct se_device *dev);
  69. static void transport_free_dev_tasks(struct se_cmd *cmd);
  70. static int transport_generic_get_mem(struct se_cmd *cmd);
  71. static void transport_put_cmd(struct se_cmd *cmd);
  72. static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
  73. static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
  74. static void transport_generic_request_failure(struct se_cmd *);
  75. static void target_complete_ok_work(struct work_struct *work);
  76. int init_se_kmem_caches(void)
  77. {
  78. se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
  79. sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
  80. 0, NULL);
  81. if (!se_tmr_req_cache) {
  82. pr_err("kmem_cache_create() for struct se_tmr_req"
  83. " failed\n");
  84. goto out;
  85. }
  86. se_sess_cache = kmem_cache_create("se_sess_cache",
  87. sizeof(struct se_session), __alignof__(struct se_session),
  88. 0, NULL);
  89. if (!se_sess_cache) {
  90. pr_err("kmem_cache_create() for struct se_session"
  91. " failed\n");
  92. goto out_free_tmr_req_cache;
  93. }
  94. se_ua_cache = kmem_cache_create("se_ua_cache",
  95. sizeof(struct se_ua), __alignof__(struct se_ua),
  96. 0, NULL);
  97. if (!se_ua_cache) {
  98. pr_err("kmem_cache_create() for struct se_ua failed\n");
  99. goto out_free_sess_cache;
  100. }
  101. t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
  102. sizeof(struct t10_pr_registration),
  103. __alignof__(struct t10_pr_registration), 0, NULL);
  104. if (!t10_pr_reg_cache) {
  105. pr_err("kmem_cache_create() for struct t10_pr_registration"
  106. " failed\n");
  107. goto out_free_ua_cache;
  108. }
  109. t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
  110. sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
  111. 0, NULL);
  112. if (!t10_alua_lu_gp_cache) {
  113. pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
  114. " failed\n");
  115. goto out_free_pr_reg_cache;
  116. }
  117. t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
  118. sizeof(struct t10_alua_lu_gp_member),
  119. __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
  120. if (!t10_alua_lu_gp_mem_cache) {
  121. pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
  122. "cache failed\n");
  123. goto out_free_lu_gp_cache;
  124. }
  125. t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
  126. sizeof(struct t10_alua_tg_pt_gp),
  127. __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
  128. if (!t10_alua_tg_pt_gp_cache) {
  129. pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
  130. "cache failed\n");
  131. goto out_free_lu_gp_mem_cache;
  132. }
  133. t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
  134. "t10_alua_tg_pt_gp_mem_cache",
  135. sizeof(struct t10_alua_tg_pt_gp_member),
  136. __alignof__(struct t10_alua_tg_pt_gp_member),
  137. 0, NULL);
  138. if (!t10_alua_tg_pt_gp_mem_cache) {
  139. pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
  140. "mem_t failed\n");
  141. goto out_free_tg_pt_gp_cache;
  142. }
  143. target_completion_wq = alloc_workqueue("target_completion",
  144. WQ_MEM_RECLAIM, 0);
  145. if (!target_completion_wq)
  146. goto out_free_tg_pt_gp_mem_cache;
  147. return 0;
  148. out_free_tg_pt_gp_mem_cache:
  149. kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
  150. out_free_tg_pt_gp_cache:
  151. kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
  152. out_free_lu_gp_mem_cache:
  153. kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
  154. out_free_lu_gp_cache:
  155. kmem_cache_destroy(t10_alua_lu_gp_cache);
  156. out_free_pr_reg_cache:
  157. kmem_cache_destroy(t10_pr_reg_cache);
  158. out_free_ua_cache:
  159. kmem_cache_destroy(se_ua_cache);
  160. out_free_sess_cache:
  161. kmem_cache_destroy(se_sess_cache);
  162. out_free_tmr_req_cache:
  163. kmem_cache_destroy(se_tmr_req_cache);
  164. out:
  165. return -ENOMEM;
  166. }
  167. void release_se_kmem_caches(void)
  168. {
  169. destroy_workqueue(target_completion_wq);
  170. kmem_cache_destroy(se_tmr_req_cache);
  171. kmem_cache_destroy(se_sess_cache);
  172. kmem_cache_destroy(se_ua_cache);
  173. kmem_cache_destroy(t10_pr_reg_cache);
  174. kmem_cache_destroy(t10_alua_lu_gp_cache);
  175. kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
  176. kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
  177. kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
  178. }
  179. /* This code ensures unique mib indexes are handed out. */
  180. static DEFINE_SPINLOCK(scsi_mib_index_lock);
  181. static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
  182. /*
  183. * Allocate a new row index for the entry type specified
  184. */
  185. u32 scsi_get_new_index(scsi_index_t type)
  186. {
  187. u32 new_index;
  188. BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
  189. spin_lock(&scsi_mib_index_lock);
  190. new_index = ++scsi_mib_index[type];
  191. spin_unlock(&scsi_mib_index_lock);
  192. return new_index;
  193. }
  194. static void transport_init_queue_obj(struct se_queue_obj *qobj)
  195. {
  196. atomic_set(&qobj->queue_cnt, 0);
  197. INIT_LIST_HEAD(&qobj->qobj_list);
  198. init_waitqueue_head(&qobj->thread_wq);
  199. spin_lock_init(&qobj->cmd_queue_lock);
  200. }
  201. void transport_subsystem_check_init(void)
  202. {
  203. int ret;
  204. if (sub_api_initialized)
  205. return;
  206. ret = request_module("target_core_iblock");
  207. if (ret != 0)
  208. pr_err("Unable to load target_core_iblock\n");
  209. ret = request_module("target_core_file");
  210. if (ret != 0)
  211. pr_err("Unable to load target_core_file\n");
  212. ret = request_module("target_core_pscsi");
  213. if (ret != 0)
  214. pr_err("Unable to load target_core_pscsi\n");
  215. ret = request_module("target_core_stgt");
  216. if (ret != 0)
  217. pr_err("Unable to load target_core_stgt\n");
  218. sub_api_initialized = 1;
  219. return;
  220. }
  221. struct se_session *transport_init_session(void)
  222. {
  223. struct se_session *se_sess;
  224. se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
  225. if (!se_sess) {
  226. pr_err("Unable to allocate struct se_session from"
  227. " se_sess_cache\n");
  228. return ERR_PTR(-ENOMEM);
  229. }
  230. INIT_LIST_HEAD(&se_sess->sess_list);
  231. INIT_LIST_HEAD(&se_sess->sess_acl_list);
  232. INIT_LIST_HEAD(&se_sess->sess_cmd_list);
  233. INIT_LIST_HEAD(&se_sess->sess_wait_list);
  234. spin_lock_init(&se_sess->sess_cmd_lock);
  235. return se_sess;
  236. }
  237. EXPORT_SYMBOL(transport_init_session);
  238. /*
  239. * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
  240. */
  241. void __transport_register_session(
  242. struct se_portal_group *se_tpg,
  243. struct se_node_acl *se_nacl,
  244. struct se_session *se_sess,
  245. void *fabric_sess_ptr)
  246. {
  247. unsigned char buf[PR_REG_ISID_LEN];
  248. se_sess->se_tpg = se_tpg;
  249. se_sess->fabric_sess_ptr = fabric_sess_ptr;
  250. /*
  251. * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
  252. *
  253. * Only set for struct se_session's that will actually be moving I/O.
  254. * eg: *NOT* discovery sessions.
  255. */
  256. if (se_nacl) {
  257. /*
  258. * If the fabric module supports an ISID based TransportID,
  259. * save this value in binary from the fabric I_T Nexus now.
  260. */
  261. if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
  262. memset(&buf[0], 0, PR_REG_ISID_LEN);
  263. se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
  264. &buf[0], PR_REG_ISID_LEN);
  265. se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
  266. }
  267. spin_lock_irq(&se_nacl->nacl_sess_lock);
  268. /*
  269. * The se_nacl->nacl_sess pointer will be set to the
  270. * last active I_T Nexus for each struct se_node_acl.
  271. */
  272. se_nacl->nacl_sess = se_sess;
  273. list_add_tail(&se_sess->sess_acl_list,
  274. &se_nacl->acl_sess_list);
  275. spin_unlock_irq(&se_nacl->nacl_sess_lock);
  276. }
  277. list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
  278. pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
  279. se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
  280. }
  281. EXPORT_SYMBOL(__transport_register_session);
  282. void transport_register_session(
  283. struct se_portal_group *se_tpg,
  284. struct se_node_acl *se_nacl,
  285. struct se_session *se_sess,
  286. void *fabric_sess_ptr)
  287. {
  288. spin_lock_bh(&se_tpg->session_lock);
  289. __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
  290. spin_unlock_bh(&se_tpg->session_lock);
  291. }
  292. EXPORT_SYMBOL(transport_register_session);
  293. void transport_deregister_session_configfs(struct se_session *se_sess)
  294. {
  295. struct se_node_acl *se_nacl;
  296. unsigned long flags;
  297. /*
  298. * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
  299. */
  300. se_nacl = se_sess->se_node_acl;
  301. if (se_nacl) {
  302. spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
  303. list_del(&se_sess->sess_acl_list);
  304. /*
  305. * If the session list is empty, then clear the pointer.
  306. * Otherwise, set the struct se_session pointer from the tail
  307. * element of the per struct se_node_acl active session list.
  308. */
  309. if (list_empty(&se_nacl->acl_sess_list))
  310. se_nacl->nacl_sess = NULL;
  311. else {
  312. se_nacl->nacl_sess = container_of(
  313. se_nacl->acl_sess_list.prev,
  314. struct se_session, sess_acl_list);
  315. }
  316. spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
  317. }
  318. }
  319. EXPORT_SYMBOL(transport_deregister_session_configfs);
  320. void transport_free_session(struct se_session *se_sess)
  321. {
  322. kmem_cache_free(se_sess_cache, se_sess);
  323. }
  324. EXPORT_SYMBOL(transport_free_session);
  325. void transport_deregister_session(struct se_session *se_sess)
  326. {
  327. struct se_portal_group *se_tpg = se_sess->se_tpg;
  328. struct se_node_acl *se_nacl;
  329. unsigned long flags;
  330. if (!se_tpg) {
  331. transport_free_session(se_sess);
  332. return;
  333. }
  334. spin_lock_irqsave(&se_tpg->session_lock, flags);
  335. list_del(&se_sess->sess_list);
  336. se_sess->se_tpg = NULL;
  337. se_sess->fabric_sess_ptr = NULL;
  338. spin_unlock_irqrestore(&se_tpg->session_lock, flags);
  339. /*
  340. * Determine if we need to do extra work for this initiator node's
  341. * struct se_node_acl if it had been previously dynamically generated.
  342. */
  343. se_nacl = se_sess->se_node_acl;
  344. if (se_nacl) {
  345. spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
  346. if (se_nacl->dynamic_node_acl) {
  347. if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
  348. se_tpg)) {
  349. list_del(&se_nacl->acl_list);
  350. se_tpg->num_node_acls--;
  351. spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
  352. core_tpg_wait_for_nacl_pr_ref(se_nacl);
  353. core_free_device_list_for_node(se_nacl, se_tpg);
  354. se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
  355. se_nacl);
  356. spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
  357. }
  358. }
  359. spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
  360. }
  361. transport_free_session(se_sess);
  362. pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
  363. se_tpg->se_tpg_tfo->get_fabric_name());
  364. }
  365. EXPORT_SYMBOL(transport_deregister_session);
  366. /*
  367. * Called with cmd->t_state_lock held.
  368. */
  369. static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
  370. {
  371. struct se_device *dev = cmd->se_dev;
  372. struct se_task *task;
  373. unsigned long flags;
  374. if (!dev)
  375. return;
  376. list_for_each_entry(task, &cmd->t_task_list, t_list) {
  377. if (task->task_flags & TF_ACTIVE)
  378. continue;
  379. spin_lock_irqsave(&dev->execute_task_lock, flags);
  380. if (task->t_state_active) {
  381. pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
  382. cmd->se_tfo->get_task_tag(cmd), dev, task);
  383. list_del(&task->t_state_list);
  384. atomic_dec(&cmd->t_task_cdbs_ex_left);
  385. task->t_state_active = false;
  386. }
  387. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  388. }
  389. }
  390. /* transport_cmd_check_stop():
  391. *
  392. * 'transport_off = 1' determines if t_transport_active should be cleared.
  393. * 'transport_off = 2' determines if task_dev_state should be removed.
  394. *
  395. * A non-zero u8 t_state sets cmd->t_state.
  396. * Returns 1 when command is stopped, else 0.
  397. */
  398. static int transport_cmd_check_stop(
  399. struct se_cmd *cmd,
  400. int transport_off,
  401. u8 t_state)
  402. {
  403. unsigned long flags;
  404. spin_lock_irqsave(&cmd->t_state_lock, flags);
  405. /*
  406. * Determine if IOCTL context caller in requesting the stopping of this
  407. * command for LUN shutdown purposes.
  408. */
  409. if (atomic_read(&cmd->transport_lun_stop)) {
  410. pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
  411. " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
  412. cmd->se_tfo->get_task_tag(cmd));
  413. atomic_set(&cmd->t_transport_active, 0);
  414. if (transport_off == 2)
  415. transport_all_task_dev_remove_state(cmd);
  416. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  417. complete(&cmd->transport_lun_stop_comp);
  418. return 1;
  419. }
  420. /*
  421. * Determine if frontend context caller is requesting the stopping of
  422. * this command for frontend exceptions.
  423. */
  424. if (atomic_read(&cmd->t_transport_stop)) {
  425. pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
  426. " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
  427. cmd->se_tfo->get_task_tag(cmd));
  428. if (transport_off == 2)
  429. transport_all_task_dev_remove_state(cmd);
  430. /*
  431. * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
  432. * to FE.
  433. */
  434. if (transport_off == 2)
  435. cmd->se_lun = NULL;
  436. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  437. complete(&cmd->t_transport_stop_comp);
  438. return 1;
  439. }
  440. if (transport_off) {
  441. atomic_set(&cmd->t_transport_active, 0);
  442. if (transport_off == 2) {
  443. transport_all_task_dev_remove_state(cmd);
  444. /*
  445. * Clear struct se_cmd->se_lun before the transport_off == 2
  446. * handoff to fabric module.
  447. */
  448. cmd->se_lun = NULL;
  449. /*
  450. * Some fabric modules like tcm_loop can release
  451. * their internally allocated I/O reference now and
  452. * struct se_cmd now.
  453. *
  454. * Fabric modules are expected to return '1' here if the
  455. * se_cmd being passed is released at this point,
  456. * or zero if not being released.
  457. */
  458. if (cmd->se_tfo->check_stop_free != NULL) {
  459. spin_unlock_irqrestore(
  460. &cmd->t_state_lock, flags);
  461. return cmd->se_tfo->check_stop_free(cmd);
  462. }
  463. }
  464. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  465. return 0;
  466. } else if (t_state)
  467. cmd->t_state = t_state;
  468. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  469. return 0;
  470. }
  471. static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
  472. {
  473. return transport_cmd_check_stop(cmd, 2, 0);
  474. }
  475. static void transport_lun_remove_cmd(struct se_cmd *cmd)
  476. {
  477. struct se_lun *lun = cmd->se_lun;
  478. unsigned long flags;
  479. if (!lun)
  480. return;
  481. spin_lock_irqsave(&cmd->t_state_lock, flags);
  482. if (!atomic_read(&cmd->transport_dev_active)) {
  483. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  484. goto check_lun;
  485. }
  486. atomic_set(&cmd->transport_dev_active, 0);
  487. transport_all_task_dev_remove_state(cmd);
  488. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  489. check_lun:
  490. spin_lock_irqsave(&lun->lun_cmd_lock, flags);
  491. if (atomic_read(&cmd->transport_lun_active)) {
  492. list_del(&cmd->se_lun_node);
  493. atomic_set(&cmd->transport_lun_active, 0);
  494. #if 0
  495. pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
  496. cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
  497. #endif
  498. }
  499. spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
  500. }
  501. void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
  502. {
  503. if (!cmd->se_tmr_req)
  504. transport_lun_remove_cmd(cmd);
  505. if (transport_cmd_check_stop_to_fabric(cmd))
  506. return;
  507. if (remove) {
  508. transport_remove_cmd_from_queue(cmd);
  509. transport_put_cmd(cmd);
  510. }
  511. }
  512. static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
  513. bool at_head)
  514. {
  515. struct se_device *dev = cmd->se_dev;
  516. struct se_queue_obj *qobj = &dev->dev_queue_obj;
  517. unsigned long flags;
  518. if (t_state) {
  519. spin_lock_irqsave(&cmd->t_state_lock, flags);
  520. cmd->t_state = t_state;
  521. atomic_set(&cmd->t_transport_active, 1);
  522. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  523. }
  524. spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
  525. /* If the cmd is already on the list, remove it before we add it */
  526. if (!list_empty(&cmd->se_queue_node))
  527. list_del(&cmd->se_queue_node);
  528. else
  529. atomic_inc(&qobj->queue_cnt);
  530. if (at_head)
  531. list_add(&cmd->se_queue_node, &qobj->qobj_list);
  532. else
  533. list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
  534. atomic_set(&cmd->t_transport_queue_active, 1);
  535. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  536. wake_up_interruptible(&qobj->thread_wq);
  537. }
  538. static struct se_cmd *
  539. transport_get_cmd_from_queue(struct se_queue_obj *qobj)
  540. {
  541. struct se_cmd *cmd;
  542. unsigned long flags;
  543. spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
  544. if (list_empty(&qobj->qobj_list)) {
  545. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  546. return NULL;
  547. }
  548. cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
  549. atomic_set(&cmd->t_transport_queue_active, 0);
  550. list_del_init(&cmd->se_queue_node);
  551. atomic_dec(&qobj->queue_cnt);
  552. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  553. return cmd;
  554. }
  555. static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
  556. {
  557. struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
  558. unsigned long flags;
  559. spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
  560. if (!atomic_read(&cmd->t_transport_queue_active)) {
  561. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  562. return;
  563. }
  564. atomic_set(&cmd->t_transport_queue_active, 0);
  565. atomic_dec(&qobj->queue_cnt);
  566. list_del_init(&cmd->se_queue_node);
  567. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  568. if (atomic_read(&cmd->t_transport_queue_active)) {
  569. pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
  570. cmd->se_tfo->get_task_tag(cmd),
  571. atomic_read(&cmd->t_transport_queue_active));
  572. }
  573. }
  574. /*
  575. * Completion function used by TCM subsystem plugins (such as FILEIO)
  576. * for queueing up response from struct se_subsystem_api->do_task()
  577. */
  578. void transport_complete_sync_cache(struct se_cmd *cmd, int good)
  579. {
  580. struct se_task *task = list_entry(cmd->t_task_list.next,
  581. struct se_task, t_list);
  582. if (good) {
  583. cmd->scsi_status = SAM_STAT_GOOD;
  584. task->task_scsi_status = GOOD;
  585. } else {
  586. task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
  587. task->task_se_cmd->scsi_sense_reason =
  588. TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  589. }
  590. transport_complete_task(task, good);
  591. }
  592. EXPORT_SYMBOL(transport_complete_sync_cache);
  593. static void target_complete_failure_work(struct work_struct *work)
  594. {
  595. struct se_cmd *cmd = container_of(work, struct se_cmd, work);
  596. transport_generic_request_failure(cmd);
  597. }
  598. /* transport_complete_task():
  599. *
  600. * Called from interrupt and non interrupt context depending
  601. * on the transport plugin.
  602. */
  603. void transport_complete_task(struct se_task *task, int success)
  604. {
  605. struct se_cmd *cmd = task->task_se_cmd;
  606. struct se_device *dev = cmd->se_dev;
  607. unsigned long flags;
  608. spin_lock_irqsave(&cmd->t_state_lock, flags);
  609. task->task_flags &= ~TF_ACTIVE;
  610. /*
  611. * See if any sense data exists, if so set the TASK_SENSE flag.
  612. * Also check for any other post completion work that needs to be
  613. * done by the plugins.
  614. */
  615. if (dev && dev->transport->transport_complete) {
  616. if (dev->transport->transport_complete(task) != 0) {
  617. cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
  618. task->task_flags |= TF_HAS_SENSE;
  619. success = 1;
  620. }
  621. }
  622. /*
  623. * See if we are waiting for outstanding struct se_task
  624. * to complete for an exception condition
  625. */
  626. if (task->task_flags & TF_REQUEST_STOP) {
  627. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  628. complete(&task->task_stop_comp);
  629. return;
  630. }
  631. if (!success)
  632. cmd->t_tasks_failed = 1;
  633. /*
  634. * Decrement the outstanding t_task_cdbs_left count. The last
  635. * struct se_task from struct se_cmd will complete itself into the
  636. * device queue depending upon int success.
  637. */
  638. if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
  639. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  640. return;
  641. }
  642. if (cmd->t_tasks_failed) {
  643. cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  644. INIT_WORK(&cmd->work, target_complete_failure_work);
  645. } else {
  646. atomic_set(&cmd->t_transport_complete, 1);
  647. INIT_WORK(&cmd->work, target_complete_ok_work);
  648. }
  649. cmd->t_state = TRANSPORT_COMPLETE;
  650. atomic_set(&cmd->t_transport_active, 1);
  651. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  652. queue_work(target_completion_wq, &cmd->work);
  653. }
  654. EXPORT_SYMBOL(transport_complete_task);
  655. /*
  656. * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
  657. * struct se_task list are ready to be added to the active execution list
  658. * struct se_device
  659. * Called with se_dev_t->execute_task_lock called.
  660. */
  661. static inline int transport_add_task_check_sam_attr(
  662. struct se_task *task,
  663. struct se_task *task_prev,
  664. struct se_device *dev)
  665. {
  666. /*
  667. * No SAM Task attribute emulation enabled, add to tail of
  668. * execution queue
  669. */
  670. if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
  671. list_add_tail(&task->t_execute_list, &dev->execute_task_list);
  672. return 0;
  673. }
  674. /*
  675. * HEAD_OF_QUEUE attribute for received CDB, which means
  676. * the first task that is associated with a struct se_cmd goes to
  677. * head of the struct se_device->execute_task_list, and task_prev
  678. * after that for each subsequent task
  679. */
  680. if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
  681. list_add(&task->t_execute_list,
  682. (task_prev != NULL) ?
  683. &task_prev->t_execute_list :
  684. &dev->execute_task_list);
  685. pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
  686. " in execution queue\n",
  687. task->task_se_cmd->t_task_cdb[0]);
  688. return 1;
  689. }
  690. /*
  691. * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
  692. * transitioned from Dermant -> Active state, and are added to the end
  693. * of the struct se_device->execute_task_list
  694. */
  695. list_add_tail(&task->t_execute_list, &dev->execute_task_list);
  696. return 0;
  697. }
  698. /* __transport_add_task_to_execute_queue():
  699. *
  700. * Called with se_dev_t->execute_task_lock called.
  701. */
  702. static void __transport_add_task_to_execute_queue(
  703. struct se_task *task,
  704. struct se_task *task_prev,
  705. struct se_device *dev)
  706. {
  707. int head_of_queue;
  708. head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
  709. atomic_inc(&dev->execute_tasks);
  710. if (task->t_state_active)
  711. return;
  712. /*
  713. * Determine if this task needs to go to HEAD_OF_QUEUE for the
  714. * state list as well. Running with SAM Task Attribute emulation
  715. * will always return head_of_queue == 0 here
  716. */
  717. if (head_of_queue)
  718. list_add(&task->t_state_list, (task_prev) ?
  719. &task_prev->t_state_list :
  720. &dev->state_task_list);
  721. else
  722. list_add_tail(&task->t_state_list, &dev->state_task_list);
  723. task->t_state_active = true;
  724. pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
  725. task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
  726. task, dev);
  727. }
  728. static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
  729. {
  730. struct se_device *dev = cmd->se_dev;
  731. struct se_task *task;
  732. unsigned long flags;
  733. spin_lock_irqsave(&cmd->t_state_lock, flags);
  734. list_for_each_entry(task, &cmd->t_task_list, t_list) {
  735. spin_lock(&dev->execute_task_lock);
  736. if (!task->t_state_active) {
  737. list_add_tail(&task->t_state_list,
  738. &dev->state_task_list);
  739. task->t_state_active = true;
  740. pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
  741. task->task_se_cmd->se_tfo->get_task_tag(
  742. task->task_se_cmd), task, dev);
  743. }
  744. spin_unlock(&dev->execute_task_lock);
  745. }
  746. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  747. }
  748. static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
  749. {
  750. struct se_device *dev = cmd->se_dev;
  751. struct se_task *task, *task_prev = NULL;
  752. list_for_each_entry(task, &cmd->t_task_list, t_list) {
  753. if (!list_empty(&task->t_execute_list))
  754. continue;
  755. /*
  756. * __transport_add_task_to_execute_queue() handles the
  757. * SAM Task Attribute emulation if enabled
  758. */
  759. __transport_add_task_to_execute_queue(task, task_prev, dev);
  760. task_prev = task;
  761. }
  762. }
  763. static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
  764. {
  765. unsigned long flags;
  766. struct se_device *dev = cmd->se_dev;
  767. spin_lock_irqsave(&dev->execute_task_lock, flags);
  768. __transport_add_tasks_from_cmd(cmd);
  769. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  770. }
  771. void __transport_remove_task_from_execute_queue(struct se_task *task,
  772. struct se_device *dev)
  773. {
  774. list_del_init(&task->t_execute_list);
  775. atomic_dec(&dev->execute_tasks);
  776. }
  777. static void transport_remove_task_from_execute_queue(
  778. struct se_task *task,
  779. struct se_device *dev)
  780. {
  781. unsigned long flags;
  782. if (WARN_ON(list_empty(&task->t_execute_list)))
  783. return;
  784. spin_lock_irqsave(&dev->execute_task_lock, flags);
  785. __transport_remove_task_from_execute_queue(task, dev);
  786. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  787. }
  788. /*
  789. * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
  790. */
  791. static void target_qf_do_work(struct work_struct *work)
  792. {
  793. struct se_device *dev = container_of(work, struct se_device,
  794. qf_work_queue);
  795. LIST_HEAD(qf_cmd_list);
  796. struct se_cmd *cmd, *cmd_tmp;
  797. spin_lock_irq(&dev->qf_cmd_lock);
  798. list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
  799. spin_unlock_irq(&dev->qf_cmd_lock);
  800. list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
  801. list_del(&cmd->se_qf_node);
  802. atomic_dec(&dev->dev_qf_count);
  803. smp_mb__after_atomic_dec();
  804. pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
  805. " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
  806. (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
  807. (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
  808. : "UNKNOWN");
  809. transport_add_cmd_to_queue(cmd, cmd->t_state, true);
  810. }
  811. }
  812. unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
  813. {
  814. switch (cmd->data_direction) {
  815. case DMA_NONE:
  816. return "NONE";
  817. case DMA_FROM_DEVICE:
  818. return "READ";
  819. case DMA_TO_DEVICE:
  820. return "WRITE";
  821. case DMA_BIDIRECTIONAL:
  822. return "BIDI";
  823. default:
  824. break;
  825. }
  826. return "UNKNOWN";
  827. }
  828. void transport_dump_dev_state(
  829. struct se_device *dev,
  830. char *b,
  831. int *bl)
  832. {
  833. *bl += sprintf(b + *bl, "Status: ");
  834. switch (dev->dev_status) {
  835. case TRANSPORT_DEVICE_ACTIVATED:
  836. *bl += sprintf(b + *bl, "ACTIVATED");
  837. break;
  838. case TRANSPORT_DEVICE_DEACTIVATED:
  839. *bl += sprintf(b + *bl, "DEACTIVATED");
  840. break;
  841. case TRANSPORT_DEVICE_SHUTDOWN:
  842. *bl += sprintf(b + *bl, "SHUTDOWN");
  843. break;
  844. case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
  845. case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
  846. *bl += sprintf(b + *bl, "OFFLINE");
  847. break;
  848. default:
  849. *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
  850. break;
  851. }
  852. *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
  853. atomic_read(&dev->execute_tasks), dev->queue_depth);
  854. *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
  855. dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
  856. *bl += sprintf(b + *bl, " ");
  857. }
  858. void transport_dump_vpd_proto_id(
  859. struct t10_vpd *vpd,
  860. unsigned char *p_buf,
  861. int p_buf_len)
  862. {
  863. unsigned char buf[VPD_TMP_BUF_SIZE];
  864. int len;
  865. memset(buf, 0, VPD_TMP_BUF_SIZE);
  866. len = sprintf(buf, "T10 VPD Protocol Identifier: ");
  867. switch (vpd->protocol_identifier) {
  868. case 0x00:
  869. sprintf(buf+len, "Fibre Channel\n");
  870. break;
  871. case 0x10:
  872. sprintf(buf+len, "Parallel SCSI\n");
  873. break;
  874. case 0x20:
  875. sprintf(buf+len, "SSA\n");
  876. break;
  877. case 0x30:
  878. sprintf(buf+len, "IEEE 1394\n");
  879. break;
  880. case 0x40:
  881. sprintf(buf+len, "SCSI Remote Direct Memory Access"
  882. " Protocol\n");
  883. break;
  884. case 0x50:
  885. sprintf(buf+len, "Internet SCSI (iSCSI)\n");
  886. break;
  887. case 0x60:
  888. sprintf(buf+len, "SAS Serial SCSI Protocol\n");
  889. break;
  890. case 0x70:
  891. sprintf(buf+len, "Automation/Drive Interface Transport"
  892. " Protocol\n");
  893. break;
  894. case 0x80:
  895. sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
  896. break;
  897. default:
  898. sprintf(buf+len, "Unknown 0x%02x\n",
  899. vpd->protocol_identifier);
  900. break;
  901. }
  902. if (p_buf)
  903. strncpy(p_buf, buf, p_buf_len);
  904. else
  905. pr_debug("%s", buf);
  906. }
  907. void
  908. transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
  909. {
  910. /*
  911. * Check if the Protocol Identifier Valid (PIV) bit is set..
  912. *
  913. * from spc3r23.pdf section 7.5.1
  914. */
  915. if (page_83[1] & 0x80) {
  916. vpd->protocol_identifier = (page_83[0] & 0xf0);
  917. vpd->protocol_identifier_set = 1;
  918. transport_dump_vpd_proto_id(vpd, NULL, 0);
  919. }
  920. }
  921. EXPORT_SYMBOL(transport_set_vpd_proto_id);
  922. int transport_dump_vpd_assoc(
  923. struct t10_vpd *vpd,
  924. unsigned char *p_buf,
  925. int p_buf_len)
  926. {
  927. unsigned char buf[VPD_TMP_BUF_SIZE];
  928. int ret = 0;
  929. int len;
  930. memset(buf, 0, VPD_TMP_BUF_SIZE);
  931. len = sprintf(buf, "T10 VPD Identifier Association: ");
  932. switch (vpd->association) {
  933. case 0x00:
  934. sprintf(buf+len, "addressed logical unit\n");
  935. break;
  936. case 0x10:
  937. sprintf(buf+len, "target port\n");
  938. break;
  939. case 0x20:
  940. sprintf(buf+len, "SCSI target device\n");
  941. break;
  942. default:
  943. sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
  944. ret = -EINVAL;
  945. break;
  946. }
  947. if (p_buf)
  948. strncpy(p_buf, buf, p_buf_len);
  949. else
  950. pr_debug("%s", buf);
  951. return ret;
  952. }
  953. int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
  954. {
  955. /*
  956. * The VPD identification association..
  957. *
  958. * from spc3r23.pdf Section 7.6.3.1 Table 297
  959. */
  960. vpd->association = (page_83[1] & 0x30);
  961. return transport_dump_vpd_assoc(vpd, NULL, 0);
  962. }
  963. EXPORT_SYMBOL(transport_set_vpd_assoc);
  964. int transport_dump_vpd_ident_type(
  965. struct t10_vpd *vpd,
  966. unsigned char *p_buf,
  967. int p_buf_len)
  968. {
  969. unsigned char buf[VPD_TMP_BUF_SIZE];
  970. int ret = 0;
  971. int len;
  972. memset(buf, 0, VPD_TMP_BUF_SIZE);
  973. len = sprintf(buf, "T10 VPD Identifier Type: ");
  974. switch (vpd->device_identifier_type) {
  975. case 0x00:
  976. sprintf(buf+len, "Vendor specific\n");
  977. break;
  978. case 0x01:
  979. sprintf(buf+len, "T10 Vendor ID based\n");
  980. break;
  981. case 0x02:
  982. sprintf(buf+len, "EUI-64 based\n");
  983. break;
  984. case 0x03:
  985. sprintf(buf+len, "NAA\n");
  986. break;
  987. case 0x04:
  988. sprintf(buf+len, "Relative target port identifier\n");
  989. break;
  990. case 0x08:
  991. sprintf(buf+len, "SCSI name string\n");
  992. break;
  993. default:
  994. sprintf(buf+len, "Unsupported: 0x%02x\n",
  995. vpd->device_identifier_type);
  996. ret = -EINVAL;
  997. break;
  998. }
  999. if (p_buf) {
  1000. if (p_buf_len < strlen(buf)+1)
  1001. return -EINVAL;
  1002. strncpy(p_buf, buf, p_buf_len);
  1003. } else {
  1004. pr_debug("%s", buf);
  1005. }
  1006. return ret;
  1007. }
  1008. int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
  1009. {
  1010. /*
  1011. * The VPD identifier type..
  1012. *
  1013. * from spc3r23.pdf Section 7.6.3.1 Table 298
  1014. */
  1015. vpd->device_identifier_type = (page_83[1] & 0x0f);
  1016. return transport_dump_vpd_ident_type(vpd, NULL, 0);
  1017. }
  1018. EXPORT_SYMBOL(transport_set_vpd_ident_type);
  1019. int transport_dump_vpd_ident(
  1020. struct t10_vpd *vpd,
  1021. unsigned char *p_buf,
  1022. int p_buf_len)
  1023. {
  1024. unsigned char buf[VPD_TMP_BUF_SIZE];
  1025. int ret = 0;
  1026. memset(buf, 0, VPD_TMP_BUF_SIZE);
  1027. switch (vpd->device_identifier_code_set) {
  1028. case 0x01: /* Binary */
  1029. sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
  1030. &vpd->device_identifier[0]);
  1031. break;
  1032. case 0x02: /* ASCII */
  1033. sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
  1034. &vpd->device_identifier[0]);
  1035. break;
  1036. case 0x03: /* UTF-8 */
  1037. sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
  1038. &vpd->device_identifier[0]);
  1039. break;
  1040. default:
  1041. sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
  1042. " 0x%02x", vpd->device_identifier_code_set);
  1043. ret = -EINVAL;
  1044. break;
  1045. }
  1046. if (p_buf)
  1047. strncpy(p_buf, buf, p_buf_len);
  1048. else
  1049. pr_debug("%s", buf);
  1050. return ret;
  1051. }
  1052. int
  1053. transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
  1054. {
  1055. static const char hex_str[] = "0123456789abcdef";
  1056. int j = 0, i = 4; /* offset to start of the identifer */
  1057. /*
  1058. * The VPD Code Set (encoding)
  1059. *
  1060. * from spc3r23.pdf Section 7.6.3.1 Table 296
  1061. */
  1062. vpd->device_identifier_code_set = (page_83[0] & 0x0f);
  1063. switch (vpd->device_identifier_code_set) {
  1064. case 0x01: /* Binary */
  1065. vpd->device_identifier[j++] =
  1066. hex_str[vpd->device_identifier_type];
  1067. while (i < (4 + page_83[3])) {
  1068. vpd->device_identifier[j++] =
  1069. hex_str[(page_83[i] & 0xf0) >> 4];
  1070. vpd->device_identifier[j++] =
  1071. hex_str[page_83[i] & 0x0f];
  1072. i++;
  1073. }
  1074. break;
  1075. case 0x02: /* ASCII */
  1076. case 0x03: /* UTF-8 */
  1077. while (i < (4 + page_83[3]))
  1078. vpd->device_identifier[j++] = page_83[i++];
  1079. break;
  1080. default:
  1081. break;
  1082. }
  1083. return transport_dump_vpd_ident(vpd, NULL, 0);
  1084. }
  1085. EXPORT_SYMBOL(transport_set_vpd_ident);
  1086. static void core_setup_task_attr_emulation(struct se_device *dev)
  1087. {
  1088. /*
  1089. * If this device is from Target_Core_Mod/pSCSI, disable the
  1090. * SAM Task Attribute emulation.
  1091. *
  1092. * This is currently not available in upsream Linux/SCSI Target
  1093. * mode code, and is assumed to be disabled while using TCM/pSCSI.
  1094. */
  1095. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1096. dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
  1097. return;
  1098. }
  1099. dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
  1100. pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
  1101. " device\n", dev->transport->name,
  1102. dev->transport->get_device_rev(dev));
  1103. }
  1104. static void scsi_dump_inquiry(struct se_device *dev)
  1105. {
  1106. struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
  1107. char buf[17];
  1108. int i, device_type;
  1109. /*
  1110. * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
  1111. */
  1112. for (i = 0; i < 8; i++)
  1113. if (wwn->vendor[i] >= 0x20)
  1114. buf[i] = wwn->vendor[i];
  1115. else
  1116. buf[i] = ' ';
  1117. buf[i] = '\0';
  1118. pr_debug(" Vendor: %s\n", buf);
  1119. for (i = 0; i < 16; i++)
  1120. if (wwn->model[i] >= 0x20)
  1121. buf[i] = wwn->model[i];
  1122. else
  1123. buf[i] = ' ';
  1124. buf[i] = '\0';
  1125. pr_debug(" Model: %s\n", buf);
  1126. for (i = 0; i < 4; i++)
  1127. if (wwn->revision[i] >= 0x20)
  1128. buf[i] = wwn->revision[i];
  1129. else
  1130. buf[i] = ' ';
  1131. buf[i] = '\0';
  1132. pr_debug(" Revision: %s\n", buf);
  1133. device_type = dev->transport->get_device_type(dev);
  1134. pr_debug(" Type: %s ", scsi_device_type(device_type));
  1135. pr_debug(" ANSI SCSI revision: %02x\n",
  1136. dev->transport->get_device_rev(dev));
  1137. }
  1138. struct se_device *transport_add_device_to_core_hba(
  1139. struct se_hba *hba,
  1140. struct se_subsystem_api *transport,
  1141. struct se_subsystem_dev *se_dev,
  1142. u32 device_flags,
  1143. void *transport_dev,
  1144. struct se_dev_limits *dev_limits,
  1145. const char *inquiry_prod,
  1146. const char *inquiry_rev)
  1147. {
  1148. int force_pt;
  1149. struct se_device *dev;
  1150. dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
  1151. if (!dev) {
  1152. pr_err("Unable to allocate memory for se_dev_t\n");
  1153. return NULL;
  1154. }
  1155. transport_init_queue_obj(&dev->dev_queue_obj);
  1156. dev->dev_flags = device_flags;
  1157. dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
  1158. dev->dev_ptr = transport_dev;
  1159. dev->se_hba = hba;
  1160. dev->se_sub_dev = se_dev;
  1161. dev->transport = transport;
  1162. INIT_LIST_HEAD(&dev->dev_list);
  1163. INIT_LIST_HEAD(&dev->dev_sep_list);
  1164. INIT_LIST_HEAD(&dev->dev_tmr_list);
  1165. INIT_LIST_HEAD(&dev->execute_task_list);
  1166. INIT_LIST_HEAD(&dev->delayed_cmd_list);
  1167. INIT_LIST_HEAD(&dev->state_task_list);
  1168. INIT_LIST_HEAD(&dev->qf_cmd_list);
  1169. spin_lock_init(&dev->execute_task_lock);
  1170. spin_lock_init(&dev->delayed_cmd_lock);
  1171. spin_lock_init(&dev->dev_reservation_lock);
  1172. spin_lock_init(&dev->dev_status_lock);
  1173. spin_lock_init(&dev->se_port_lock);
  1174. spin_lock_init(&dev->se_tmr_lock);
  1175. spin_lock_init(&dev->qf_cmd_lock);
  1176. atomic_set(&dev->dev_ordered_id, 0);
  1177. se_dev_set_default_attribs(dev, dev_limits);
  1178. dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
  1179. dev->creation_time = get_jiffies_64();
  1180. spin_lock_init(&dev->stats_lock);
  1181. spin_lock(&hba->device_lock);
  1182. list_add_tail(&dev->dev_list, &hba->hba_dev_list);
  1183. hba->dev_count++;
  1184. spin_unlock(&hba->device_lock);
  1185. /*
  1186. * Setup the SAM Task Attribute emulation for struct se_device
  1187. */
  1188. core_setup_task_attr_emulation(dev);
  1189. /*
  1190. * Force PR and ALUA passthrough emulation with internal object use.
  1191. */
  1192. force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
  1193. /*
  1194. * Setup the Reservations infrastructure for struct se_device
  1195. */
  1196. core_setup_reservations(dev, force_pt);
  1197. /*
  1198. * Setup the Asymmetric Logical Unit Assignment for struct se_device
  1199. */
  1200. if (core_setup_alua(dev, force_pt) < 0)
  1201. goto out;
  1202. /*
  1203. * Startup the struct se_device processing thread
  1204. */
  1205. dev->process_thread = kthread_run(transport_processing_thread, dev,
  1206. "LIO_%s", dev->transport->name);
  1207. if (IS_ERR(dev->process_thread)) {
  1208. pr_err("Unable to create kthread: LIO_%s\n",
  1209. dev->transport->name);
  1210. goto out;
  1211. }
  1212. /*
  1213. * Setup work_queue for QUEUE_FULL
  1214. */
  1215. INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
  1216. /*
  1217. * Preload the initial INQUIRY const values if we are doing
  1218. * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
  1219. * passthrough because this is being provided by the backend LLD.
  1220. * This is required so that transport_get_inquiry() copies these
  1221. * originals once back into DEV_T10_WWN(dev) for the virtual device
  1222. * setup.
  1223. */
  1224. if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
  1225. if (!inquiry_prod || !inquiry_rev) {
  1226. pr_err("All non TCM/pSCSI plugins require"
  1227. " INQUIRY consts\n");
  1228. goto out;
  1229. }
  1230. strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
  1231. strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
  1232. strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
  1233. }
  1234. scsi_dump_inquiry(dev);
  1235. return dev;
  1236. out:
  1237. kthread_stop(dev->process_thread);
  1238. spin_lock(&hba->device_lock);
  1239. list_del(&dev->dev_list);
  1240. hba->dev_count--;
  1241. spin_unlock(&hba->device_lock);
  1242. se_release_vpd_for_dev(dev);
  1243. kfree(dev);
  1244. return NULL;
  1245. }
  1246. EXPORT_SYMBOL(transport_add_device_to_core_hba);
  1247. /* transport_generic_prepare_cdb():
  1248. *
  1249. * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
  1250. * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
  1251. * The point of this is since we are mapping iSCSI LUNs to
  1252. * SCSI Target IDs having a non-zero LUN in the CDB will throw the
  1253. * devices and HBAs for a loop.
  1254. */
  1255. static inline void transport_generic_prepare_cdb(
  1256. unsigned char *cdb)
  1257. {
  1258. switch (cdb[0]) {
  1259. case READ_10: /* SBC - RDProtect */
  1260. case READ_12: /* SBC - RDProtect */
  1261. case READ_16: /* SBC - RDProtect */
  1262. case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
  1263. case VERIFY: /* SBC - VRProtect */
  1264. case VERIFY_16: /* SBC - VRProtect */
  1265. case WRITE_VERIFY: /* SBC - VRProtect */
  1266. case WRITE_VERIFY_12: /* SBC - VRProtect */
  1267. break;
  1268. default:
  1269. cdb[1] &= 0x1f; /* clear logical unit number */
  1270. break;
  1271. }
  1272. }
  1273. static struct se_task *
  1274. transport_generic_get_task(struct se_cmd *cmd,
  1275. enum dma_data_direction data_direction)
  1276. {
  1277. struct se_task *task;
  1278. struct se_device *dev = cmd->se_dev;
  1279. task = dev->transport->alloc_task(cmd->t_task_cdb);
  1280. if (!task) {
  1281. pr_err("Unable to allocate struct se_task\n");
  1282. return NULL;
  1283. }
  1284. INIT_LIST_HEAD(&task->t_list);
  1285. INIT_LIST_HEAD(&task->t_execute_list);
  1286. INIT_LIST_HEAD(&task->t_state_list);
  1287. init_completion(&task->task_stop_comp);
  1288. task->task_se_cmd = cmd;
  1289. task->task_data_direction = data_direction;
  1290. return task;
  1291. }
  1292. static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
  1293. /*
  1294. * Used by fabric modules containing a local struct se_cmd within their
  1295. * fabric dependent per I/O descriptor.
  1296. */
  1297. void transport_init_se_cmd(
  1298. struct se_cmd *cmd,
  1299. struct target_core_fabric_ops *tfo,
  1300. struct se_session *se_sess,
  1301. u32 data_length,
  1302. int data_direction,
  1303. int task_attr,
  1304. unsigned char *sense_buffer)
  1305. {
  1306. INIT_LIST_HEAD(&cmd->se_lun_node);
  1307. INIT_LIST_HEAD(&cmd->se_delayed_node);
  1308. INIT_LIST_HEAD(&cmd->se_qf_node);
  1309. INIT_LIST_HEAD(&cmd->se_queue_node);
  1310. INIT_LIST_HEAD(&cmd->se_cmd_list);
  1311. INIT_LIST_HEAD(&cmd->t_task_list);
  1312. init_completion(&cmd->transport_lun_fe_stop_comp);
  1313. init_completion(&cmd->transport_lun_stop_comp);
  1314. init_completion(&cmd->t_transport_stop_comp);
  1315. init_completion(&cmd->cmd_wait_comp);
  1316. spin_lock_init(&cmd->t_state_lock);
  1317. atomic_set(&cmd->transport_dev_active, 1);
  1318. cmd->se_tfo = tfo;
  1319. cmd->se_sess = se_sess;
  1320. cmd->data_length = data_length;
  1321. cmd->data_direction = data_direction;
  1322. cmd->sam_task_attr = task_attr;
  1323. cmd->sense_buffer = sense_buffer;
  1324. }
  1325. EXPORT_SYMBOL(transport_init_se_cmd);
  1326. static int transport_check_alloc_task_attr(struct se_cmd *cmd)
  1327. {
  1328. /*
  1329. * Check if SAM Task Attribute emulation is enabled for this
  1330. * struct se_device storage object
  1331. */
  1332. if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
  1333. return 0;
  1334. if (cmd->sam_task_attr == MSG_ACA_TAG) {
  1335. pr_debug("SAM Task Attribute ACA"
  1336. " emulation is not supported\n");
  1337. return -EINVAL;
  1338. }
  1339. /*
  1340. * Used to determine when ORDERED commands should go from
  1341. * Dormant to Active status.
  1342. */
  1343. cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
  1344. smp_mb__after_atomic_inc();
  1345. pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
  1346. cmd->se_ordered_id, cmd->sam_task_attr,
  1347. cmd->se_dev->transport->name);
  1348. return 0;
  1349. }
  1350. /* transport_generic_allocate_tasks():
  1351. *
  1352. * Called from fabric RX Thread.
  1353. */
  1354. int transport_generic_allocate_tasks(
  1355. struct se_cmd *cmd,
  1356. unsigned char *cdb)
  1357. {
  1358. int ret;
  1359. transport_generic_prepare_cdb(cdb);
  1360. /*
  1361. * Ensure that the received CDB is less than the max (252 + 8) bytes
  1362. * for VARIABLE_LENGTH_CMD
  1363. */
  1364. if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
  1365. pr_err("Received SCSI CDB with command_size: %d that"
  1366. " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
  1367. scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
  1368. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  1369. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  1370. return -EINVAL;
  1371. }
  1372. /*
  1373. * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
  1374. * allocate the additional extended CDB buffer now.. Otherwise
  1375. * setup the pointer from __t_task_cdb to t_task_cdb.
  1376. */
  1377. if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
  1378. cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
  1379. GFP_KERNEL);
  1380. if (!cmd->t_task_cdb) {
  1381. pr_err("Unable to allocate cmd->t_task_cdb"
  1382. " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
  1383. scsi_command_size(cdb),
  1384. (unsigned long)sizeof(cmd->__t_task_cdb));
  1385. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  1386. cmd->scsi_sense_reason =
  1387. TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  1388. return -ENOMEM;
  1389. }
  1390. } else
  1391. cmd->t_task_cdb = &cmd->__t_task_cdb[0];
  1392. /*
  1393. * Copy the original CDB into cmd->
  1394. */
  1395. memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
  1396. /*
  1397. * Setup the received CDB based on SCSI defined opcodes and
  1398. * perform unit attention, persistent reservations and ALUA
  1399. * checks for virtual device backends. The cmd->t_task_cdb
  1400. * pointer is expected to be setup before we reach this point.
  1401. */
  1402. ret = transport_generic_cmd_sequencer(cmd, cdb);
  1403. if (ret < 0)
  1404. return ret;
  1405. /*
  1406. * Check for SAM Task Attribute Emulation
  1407. */
  1408. if (transport_check_alloc_task_attr(cmd) < 0) {
  1409. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  1410. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  1411. return -EINVAL;
  1412. }
  1413. spin_lock(&cmd->se_lun->lun_sep_lock);
  1414. if (cmd->se_lun->lun_sep)
  1415. cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
  1416. spin_unlock(&cmd->se_lun->lun_sep_lock);
  1417. return 0;
  1418. }
  1419. EXPORT_SYMBOL(transport_generic_allocate_tasks);
  1420. /*
  1421. * Used by fabric module frontends to queue tasks directly.
  1422. * Many only be used from process context only
  1423. */
  1424. int transport_handle_cdb_direct(
  1425. struct se_cmd *cmd)
  1426. {
  1427. int ret;
  1428. if (!cmd->se_lun) {
  1429. dump_stack();
  1430. pr_err("cmd->se_lun is NULL\n");
  1431. return -EINVAL;
  1432. }
  1433. if (in_interrupt()) {
  1434. dump_stack();
  1435. pr_err("transport_generic_handle_cdb cannot be called"
  1436. " from interrupt context\n");
  1437. return -EINVAL;
  1438. }
  1439. /*
  1440. * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
  1441. * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
  1442. * in existing usage to ensure that outstanding descriptors are handled
  1443. * correctly during shutdown via transport_wait_for_tasks()
  1444. *
  1445. * Also, we don't take cmd->t_state_lock here as we only expect
  1446. * this to be called for initial descriptor submission.
  1447. */
  1448. cmd->t_state = TRANSPORT_NEW_CMD;
  1449. atomic_set(&cmd->t_transport_active, 1);
  1450. /*
  1451. * transport_generic_new_cmd() is already handling QUEUE_FULL,
  1452. * so follow TRANSPORT_NEW_CMD processing thread context usage
  1453. * and call transport_generic_request_failure() if necessary..
  1454. */
  1455. ret = transport_generic_new_cmd(cmd);
  1456. if (ret < 0)
  1457. transport_generic_request_failure(cmd);
  1458. return 0;
  1459. }
  1460. EXPORT_SYMBOL(transport_handle_cdb_direct);
  1461. /**
  1462. * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
  1463. *
  1464. * @se_cmd: command descriptor to submit
  1465. * @se_sess: associated se_sess for endpoint
  1466. * @cdb: pointer to SCSI CDB
  1467. * @sense: pointer to SCSI sense buffer
  1468. * @unpacked_lun: unpacked LUN to reference for struct se_lun
  1469. * @data_length: fabric expected data transfer length
  1470. * @task_addr: SAM task attribute
  1471. * @data_dir: DMA data direction
  1472. * @flags: flags for command submission from target_sc_flags_tables
  1473. *
  1474. * This may only be called from process context, and also currently
  1475. * assumes internal allocation of fabric payload buffer by target-core.
  1476. **/
  1477. int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
  1478. unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
  1479. u32 data_length, int task_attr, int data_dir, int flags)
  1480. {
  1481. struct se_portal_group *se_tpg;
  1482. int rc;
  1483. se_tpg = se_sess->se_tpg;
  1484. BUG_ON(!se_tpg);
  1485. BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
  1486. BUG_ON(in_interrupt());
  1487. /*
  1488. * Initialize se_cmd for target operation. From this point
  1489. * exceptions are handled by sending exception status via
  1490. * target_core_fabric_ops->queue_status() callback
  1491. */
  1492. transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
  1493. data_length, data_dir, task_attr, sense);
  1494. /*
  1495. * Obtain struct se_cmd->cmd_kref reference and add new cmd to
  1496. * se_sess->sess_cmd_list. A second kref_get here is necessary
  1497. * for fabrics using TARGET_SCF_ACK_KREF that expect a second
  1498. * kref_put() to happen during fabric packet acknowledgement.
  1499. */
  1500. target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
  1501. /*
  1502. * Signal bidirectional data payloads to target-core
  1503. */
  1504. if (flags & TARGET_SCF_BIDI_OP)
  1505. se_cmd->se_cmd_flags |= SCF_BIDI;
  1506. /*
  1507. * Locate se_lun pointer and attach it to struct se_cmd
  1508. */
  1509. if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0)
  1510. goto out_check_cond;
  1511. /*
  1512. * Sanitize CDBs via transport_generic_cmd_sequencer() and
  1513. * allocate the necessary tasks to complete the received CDB+data
  1514. */
  1515. rc = transport_generic_allocate_tasks(se_cmd, cdb);
  1516. if (rc != 0)
  1517. goto out_check_cond;
  1518. /*
  1519. * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
  1520. * for immediate execution of READs, otherwise wait for
  1521. * transport_generic_handle_data() to be called for WRITEs
  1522. * when fabric has filled the incoming buffer.
  1523. */
  1524. transport_handle_cdb_direct(se_cmd);
  1525. return 0;
  1526. out_check_cond:
  1527. transport_send_check_condition_and_sense(se_cmd,
  1528. se_cmd->scsi_sense_reason, 0);
  1529. return 0;
  1530. }
  1531. EXPORT_SYMBOL(target_submit_cmd);
  1532. /*
  1533. * Used by fabric module frontends defining a TFO->new_cmd_map() caller
  1534. * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
  1535. * complete setup in TCM process context w/ TFO->new_cmd_map().
  1536. */
  1537. int transport_generic_handle_cdb_map(
  1538. struct se_cmd *cmd)
  1539. {
  1540. if (!cmd->se_lun) {
  1541. dump_stack();
  1542. pr_err("cmd->se_lun is NULL\n");
  1543. return -EINVAL;
  1544. }
  1545. transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
  1546. return 0;
  1547. }
  1548. EXPORT_SYMBOL(transport_generic_handle_cdb_map);
  1549. /* transport_generic_handle_data():
  1550. *
  1551. *
  1552. */
  1553. int transport_generic_handle_data(
  1554. struct se_cmd *cmd)
  1555. {
  1556. /*
  1557. * For the software fabric case, then we assume the nexus is being
  1558. * failed/shutdown when signals are pending from the kthread context
  1559. * caller, so we return a failure. For the HW target mode case running
  1560. * in interrupt code, the signal_pending() check is skipped.
  1561. */
  1562. if (!in_interrupt() && signal_pending(current))
  1563. return -EPERM;
  1564. /*
  1565. * If the received CDB has aleady been ABORTED by the generic
  1566. * target engine, we now call transport_check_aborted_status()
  1567. * to queue any delated TASK_ABORTED status for the received CDB to the
  1568. * fabric module as we are expecting no further incoming DATA OUT
  1569. * sequences at this point.
  1570. */
  1571. if (transport_check_aborted_status(cmd, 1) != 0)
  1572. return 0;
  1573. transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
  1574. return 0;
  1575. }
  1576. EXPORT_SYMBOL(transport_generic_handle_data);
  1577. /* transport_generic_handle_tmr():
  1578. *
  1579. *
  1580. */
  1581. int transport_generic_handle_tmr(
  1582. struct se_cmd *cmd)
  1583. {
  1584. transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
  1585. return 0;
  1586. }
  1587. EXPORT_SYMBOL(transport_generic_handle_tmr);
  1588. /*
  1589. * If the task is active, request it to be stopped and sleep until it
  1590. * has completed.
  1591. */
  1592. bool target_stop_task(struct se_task *task, unsigned long *flags)
  1593. {
  1594. struct se_cmd *cmd = task->task_se_cmd;
  1595. bool was_active = false;
  1596. if (task->task_flags & TF_ACTIVE) {
  1597. task->task_flags |= TF_REQUEST_STOP;
  1598. spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
  1599. pr_debug("Task %p waiting to complete\n", task);
  1600. wait_for_completion(&task->task_stop_comp);
  1601. pr_debug("Task %p stopped successfully\n", task);
  1602. spin_lock_irqsave(&cmd->t_state_lock, *flags);
  1603. atomic_dec(&cmd->t_task_cdbs_left);
  1604. task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
  1605. was_active = true;
  1606. }
  1607. return was_active;
  1608. }
  1609. static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
  1610. {
  1611. struct se_task *task, *task_tmp;
  1612. unsigned long flags;
  1613. int ret = 0;
  1614. pr_debug("ITT[0x%08x] - Stopping tasks\n",
  1615. cmd->se_tfo->get_task_tag(cmd));
  1616. /*
  1617. * No tasks remain in the execution queue
  1618. */
  1619. spin_lock_irqsave(&cmd->t_state_lock, flags);
  1620. list_for_each_entry_safe(task, task_tmp,
  1621. &cmd->t_task_list, t_list) {
  1622. pr_debug("Processing task %p\n", task);
  1623. /*
  1624. * If the struct se_task has not been sent and is not active,
  1625. * remove the struct se_task from the execution queue.
  1626. */
  1627. if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
  1628. spin_unlock_irqrestore(&cmd->t_state_lock,
  1629. flags);
  1630. transport_remove_task_from_execute_queue(task,
  1631. cmd->se_dev);
  1632. pr_debug("Task %p removed from execute queue\n", task);
  1633. spin_lock_irqsave(&cmd->t_state_lock, flags);
  1634. continue;
  1635. }
  1636. if (!target_stop_task(task, &flags)) {
  1637. pr_debug("Task %p - did nothing\n", task);
  1638. ret++;
  1639. }
  1640. }
  1641. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  1642. return ret;
  1643. }
  1644. /*
  1645. * Handle SAM-esque emulation for generic transport request failures.
  1646. */
  1647. static void transport_generic_request_failure(struct se_cmd *cmd)
  1648. {
  1649. int ret = 0;
  1650. pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
  1651. " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
  1652. cmd->t_task_cdb[0]);
  1653. pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
  1654. cmd->se_tfo->get_cmd_state(cmd),
  1655. cmd->t_state, cmd->scsi_sense_reason);
  1656. pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
  1657. " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
  1658. " t_transport_active: %d t_transport_stop: %d"
  1659. " t_transport_sent: %d\n", cmd->t_task_list_num,
  1660. atomic_read(&cmd->t_task_cdbs_left),
  1661. atomic_read(&cmd->t_task_cdbs_sent),
  1662. atomic_read(&cmd->t_task_cdbs_ex_left),
  1663. atomic_read(&cmd->t_transport_active),
  1664. atomic_read(&cmd->t_transport_stop),
  1665. atomic_read(&cmd->t_transport_sent));
  1666. /*
  1667. * For SAM Task Attribute emulation for failed struct se_cmd
  1668. */
  1669. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  1670. transport_complete_task_attr(cmd);
  1671. switch (cmd->scsi_sense_reason) {
  1672. case TCM_NON_EXISTENT_LUN:
  1673. case TCM_UNSUPPORTED_SCSI_OPCODE:
  1674. case TCM_INVALID_CDB_FIELD:
  1675. case TCM_INVALID_PARAMETER_LIST:
  1676. case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
  1677. case TCM_UNKNOWN_MODE_PAGE:
  1678. case TCM_WRITE_PROTECTED:
  1679. case TCM_CHECK_CONDITION_ABORT_CMD:
  1680. case TCM_CHECK_CONDITION_UNIT_ATTENTION:
  1681. case TCM_CHECK_CONDITION_NOT_READY:
  1682. break;
  1683. case TCM_RESERVATION_CONFLICT:
  1684. /*
  1685. * No SENSE Data payload for this case, set SCSI Status
  1686. * and queue the response to $FABRIC_MOD.
  1687. *
  1688. * Uses linux/include/scsi/scsi.h SAM status codes defs
  1689. */
  1690. cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
  1691. /*
  1692. * For UA Interlock Code 11b, a RESERVATION CONFLICT will
  1693. * establish a UNIT ATTENTION with PREVIOUS RESERVATION
  1694. * CONFLICT STATUS.
  1695. *
  1696. * See spc4r17, section 7.4.6 Control Mode Page, Table 349
  1697. */
  1698. if (cmd->se_sess &&
  1699. cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
  1700. core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
  1701. cmd->orig_fe_lun, 0x2C,
  1702. ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
  1703. ret = cmd->se_tfo->queue_status(cmd);
  1704. if (ret == -EAGAIN || ret == -ENOMEM)
  1705. goto queue_full;
  1706. goto check_stop;
  1707. default:
  1708. pr_err("Unknown transport error for CDB 0x%02x: %d\n",
  1709. cmd->t_task_cdb[0], cmd->scsi_sense_reason);
  1710. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  1711. break;
  1712. }
  1713. /*
  1714. * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
  1715. * make the call to transport_send_check_condition_and_sense()
  1716. * directly. Otherwise expect the fabric to make the call to
  1717. * transport_send_check_condition_and_sense() after handling
  1718. * possible unsoliticied write data payloads.
  1719. */
  1720. ret = transport_send_check_condition_and_sense(cmd,
  1721. cmd->scsi_sense_reason, 0);
  1722. if (ret == -EAGAIN || ret == -ENOMEM)
  1723. goto queue_full;
  1724. check_stop:
  1725. transport_lun_remove_cmd(cmd);
  1726. if (!transport_cmd_check_stop_to_fabric(cmd))
  1727. ;
  1728. return;
  1729. queue_full:
  1730. cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
  1731. transport_handle_queue_full(cmd, cmd->se_dev);
  1732. }
  1733. static inline u32 transport_lba_21(unsigned char *cdb)
  1734. {
  1735. return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
  1736. }
  1737. static inline u32 transport_lba_32(unsigned char *cdb)
  1738. {
  1739. return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
  1740. }
  1741. static inline unsigned long long transport_lba_64(unsigned char *cdb)
  1742. {
  1743. unsigned int __v1, __v2;
  1744. __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
  1745. __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
  1746. return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
  1747. }
  1748. /*
  1749. * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
  1750. */
  1751. static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
  1752. {
  1753. unsigned int __v1, __v2;
  1754. __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
  1755. __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
  1756. return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
  1757. }
  1758. static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
  1759. {
  1760. unsigned long flags;
  1761. spin_lock_irqsave(&se_cmd->t_state_lock, flags);
  1762. se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
  1763. spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
  1764. }
  1765. /*
  1766. * Called from Fabric Module context from transport_execute_tasks()
  1767. *
  1768. * The return of this function determins if the tasks from struct se_cmd
  1769. * get added to the execution queue in transport_execute_tasks(),
  1770. * or are added to the delayed or ordered lists here.
  1771. */
  1772. static inline int transport_execute_task_attr(struct se_cmd *cmd)
  1773. {
  1774. if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
  1775. return 1;
  1776. /*
  1777. * Check for the existence of HEAD_OF_QUEUE, and if true return 1
  1778. * to allow the passed struct se_cmd list of tasks to the front of the list.
  1779. */
  1780. if (cmd->sam_task_attr == MSG_HEAD_TAG) {
  1781. pr_debug("Added HEAD_OF_QUEUE for CDB:"
  1782. " 0x%02x, se_ordered_id: %u\n",
  1783. cmd->t_task_cdb[0],
  1784. cmd->se_ordered_id);
  1785. return 1;
  1786. } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
  1787. atomic_inc(&cmd->se_dev->dev_ordered_sync);
  1788. smp_mb__after_atomic_inc();
  1789. pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
  1790. " list, se_ordered_id: %u\n",
  1791. cmd->t_task_cdb[0],
  1792. cmd->se_ordered_id);
  1793. /*
  1794. * Add ORDERED command to tail of execution queue if
  1795. * no other older commands exist that need to be
  1796. * completed first.
  1797. */
  1798. if (!atomic_read(&cmd->se_dev->simple_cmds))
  1799. return 1;
  1800. } else {
  1801. /*
  1802. * For SIMPLE and UNTAGGED Task Attribute commands
  1803. */
  1804. atomic_inc(&cmd->se_dev->simple_cmds);
  1805. smp_mb__after_atomic_inc();
  1806. }
  1807. /*
  1808. * Otherwise if one or more outstanding ORDERED task attribute exist,
  1809. * add the dormant task(s) built for the passed struct se_cmd to the
  1810. * execution queue and become in Active state for this struct se_device.
  1811. */
  1812. if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
  1813. /*
  1814. * Otherwise, add cmd w/ tasks to delayed cmd queue that
  1815. * will be drained upon completion of HEAD_OF_QUEUE task.
  1816. */
  1817. spin_lock(&cmd->se_dev->delayed_cmd_lock);
  1818. cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
  1819. list_add_tail(&cmd->se_delayed_node,
  1820. &cmd->se_dev->delayed_cmd_list);
  1821. spin_unlock(&cmd->se_dev->delayed_cmd_lock);
  1822. pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
  1823. " delayed CMD list, se_ordered_id: %u\n",
  1824. cmd->t_task_cdb[0], cmd->sam_task_attr,
  1825. cmd->se_ordered_id);
  1826. /*
  1827. * Return zero to let transport_execute_tasks() know
  1828. * not to add the delayed tasks to the execution list.
  1829. */
  1830. return 0;
  1831. }
  1832. /*
  1833. * Otherwise, no ORDERED task attributes exist..
  1834. */
  1835. return 1;
  1836. }
  1837. /*
  1838. * Called from fabric module context in transport_generic_new_cmd() and
  1839. * transport_generic_process_write()
  1840. */
  1841. static int transport_execute_tasks(struct se_cmd *cmd)
  1842. {
  1843. int add_tasks;
  1844. struct se_device *se_dev = cmd->se_dev;
  1845. /*
  1846. * Call transport_cmd_check_stop() to see if a fabric exception
  1847. * has occurred that prevents execution.
  1848. */
  1849. if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
  1850. /*
  1851. * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
  1852. * attribute for the tasks of the received struct se_cmd CDB
  1853. */
  1854. add_tasks = transport_execute_task_attr(cmd);
  1855. if (!add_tasks)
  1856. goto execute_tasks;
  1857. /*
  1858. * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
  1859. * adds associated se_tasks while holding dev->execute_task_lock
  1860. * before I/O dispath to avoid a double spinlock access.
  1861. */
  1862. __transport_execute_tasks(se_dev, cmd);
  1863. return 0;
  1864. }
  1865. execute_tasks:
  1866. __transport_execute_tasks(se_dev, NULL);
  1867. return 0;
  1868. }
  1869. /*
  1870. * Called to check struct se_device tcq depth window, and once open pull struct se_task
  1871. * from struct se_device->execute_task_list and
  1872. *
  1873. * Called from transport_processing_thread()
  1874. */
  1875. static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
  1876. {
  1877. int error;
  1878. struct se_cmd *cmd = NULL;
  1879. struct se_task *task = NULL;
  1880. unsigned long flags;
  1881. check_depth:
  1882. spin_lock_irq(&dev->execute_task_lock);
  1883. if (new_cmd != NULL)
  1884. __transport_add_tasks_from_cmd(new_cmd);
  1885. if (list_empty(&dev->execute_task_list)) {
  1886. spin_unlock_irq(&dev->execute_task_lock);
  1887. return 0;
  1888. }
  1889. task = list_first_entry(&dev->execute_task_list,
  1890. struct se_task, t_execute_list);
  1891. __transport_remove_task_from_execute_queue(task, dev);
  1892. spin_unlock_irq(&dev->execute_task_lock);
  1893. cmd = task->task_se_cmd;
  1894. spin_lock_irqsave(&cmd->t_state_lock, flags);
  1895. task->task_flags |= (TF_ACTIVE | TF_SENT);
  1896. atomic_inc(&cmd->t_task_cdbs_sent);
  1897. if (atomic_read(&cmd->t_task_cdbs_sent) ==
  1898. cmd->t_task_list_num)
  1899. atomic_set(&cmd->t_transport_sent, 1);
  1900. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  1901. if (cmd->execute_task)
  1902. error = cmd->execute_task(task);
  1903. else
  1904. error = dev->transport->do_task(task);
  1905. if (error != 0) {
  1906. spin_lock_irqsave(&cmd->t_state_lock, flags);
  1907. task->task_flags &= ~TF_ACTIVE;
  1908. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  1909. atomic_set(&cmd->t_transport_sent, 0);
  1910. transport_stop_tasks_for_cmd(cmd);
  1911. transport_generic_request_failure(cmd);
  1912. }
  1913. new_cmd = NULL;
  1914. goto check_depth;
  1915. return 0;
  1916. }
  1917. static inline u32 transport_get_sectors_6(
  1918. unsigned char *cdb,
  1919. struct se_cmd *cmd,
  1920. int *ret)
  1921. {
  1922. struct se_device *dev = cmd->se_dev;
  1923. /*
  1924. * Assume TYPE_DISK for non struct se_device objects.
  1925. * Use 8-bit sector value.
  1926. */
  1927. if (!dev)
  1928. goto type_disk;
  1929. /*
  1930. * Use 24-bit allocation length for TYPE_TAPE.
  1931. */
  1932. if (dev->transport->get_device_type(dev) == TYPE_TAPE)
  1933. return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
  1934. /*
  1935. * Everything else assume TYPE_DISK Sector CDB location.
  1936. * Use 8-bit sector value. SBC-3 says:
  1937. *
  1938. * A TRANSFER LENGTH field set to zero specifies that 256
  1939. * logical blocks shall be written. Any other value
  1940. * specifies the number of logical blocks that shall be
  1941. * written.
  1942. */
  1943. type_disk:
  1944. return cdb[4] ? : 256;
  1945. }
  1946. static inline u32 transport_get_sectors_10(
  1947. unsigned char *cdb,
  1948. struct se_cmd *cmd,
  1949. int *ret)
  1950. {
  1951. struct se_device *dev = cmd->se_dev;
  1952. /*
  1953. * Assume TYPE_DISK for non struct se_device objects.
  1954. * Use 16-bit sector value.
  1955. */
  1956. if (!dev)
  1957. goto type_disk;
  1958. /*
  1959. * XXX_10 is not defined in SSC, throw an exception
  1960. */
  1961. if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
  1962. *ret = -EINVAL;
  1963. return 0;
  1964. }
  1965. /*
  1966. * Everything else assume TYPE_DISK Sector CDB location.
  1967. * Use 16-bit sector value.
  1968. */
  1969. type_disk:
  1970. return (u32)(cdb[7] << 8) + cdb[8];
  1971. }
  1972. static inline u32 transport_get_sectors_12(
  1973. unsigned char *cdb,
  1974. struct se_cmd *cmd,
  1975. int *ret)
  1976. {
  1977. struct se_device *dev = cmd->se_dev;
  1978. /*
  1979. * Assume TYPE_DISK for non struct se_device objects.
  1980. * Use 32-bit sector value.
  1981. */
  1982. if (!dev)
  1983. goto type_disk;
  1984. /*
  1985. * XXX_12 is not defined in SSC, throw an exception
  1986. */
  1987. if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
  1988. *ret = -EINVAL;
  1989. return 0;
  1990. }
  1991. /*
  1992. * Everything else assume TYPE_DISK Sector CDB location.
  1993. * Use 32-bit sector value.
  1994. */
  1995. type_disk:
  1996. return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
  1997. }
  1998. static inline u32 transport_get_sectors_16(
  1999. unsigned char *cdb,
  2000. struct se_cmd *cmd,
  2001. int *ret)
  2002. {
  2003. struct se_device *dev = cmd->se_dev;
  2004. /*
  2005. * Assume TYPE_DISK for non struct se_device objects.
  2006. * Use 32-bit sector value.
  2007. */
  2008. if (!dev)
  2009. goto type_disk;
  2010. /*
  2011. * Use 24-bit allocation length for TYPE_TAPE.
  2012. */
  2013. if (dev->transport->get_device_type(dev) == TYPE_TAPE)
  2014. return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
  2015. type_disk:
  2016. return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
  2017. (cdb[12] << 8) + cdb[13];
  2018. }
  2019. /*
  2020. * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
  2021. */
  2022. static inline u32 transport_get_sectors_32(
  2023. unsigned char *cdb,
  2024. struct se_cmd *cmd,
  2025. int *ret)
  2026. {
  2027. /*
  2028. * Assume TYPE_DISK for non struct se_device objects.
  2029. * Use 32-bit sector value.
  2030. */
  2031. return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
  2032. (cdb[30] << 8) + cdb[31];
  2033. }
  2034. static inline u32 transport_get_size(
  2035. u32 sectors,
  2036. unsigned char *cdb,
  2037. struct se_cmd *cmd)
  2038. {
  2039. struct se_device *dev = cmd->se_dev;
  2040. if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
  2041. if (cdb[1] & 1) { /* sectors */
  2042. return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
  2043. } else /* bytes */
  2044. return sectors;
  2045. }
  2046. #if 0
  2047. pr_debug("Returning block_size: %u, sectors: %u == %u for"
  2048. " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
  2049. dev->se_sub_dev->se_dev_attrib.block_size * sectors,
  2050. dev->transport->name);
  2051. #endif
  2052. return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
  2053. }
  2054. static void transport_xor_callback(struct se_cmd *cmd)
  2055. {
  2056. unsigned char *buf, *addr;
  2057. struct scatterlist *sg;
  2058. unsigned int offset;
  2059. int i;
  2060. int count;
  2061. /*
  2062. * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
  2063. *
  2064. * 1) read the specified logical block(s);
  2065. * 2) transfer logical blocks from the data-out buffer;
  2066. * 3) XOR the logical blocks transferred from the data-out buffer with
  2067. * the logical blocks read, storing the resulting XOR data in a buffer;
  2068. * 4) if the DISABLE WRITE bit is set to zero, then write the logical
  2069. * blocks transferred from the data-out buffer; and
  2070. * 5) transfer the resulting XOR data to the data-in buffer.
  2071. */
  2072. buf = kmalloc(cmd->data_length, GFP_KERNEL);
  2073. if (!buf) {
  2074. pr_err("Unable to allocate xor_callback buf\n");
  2075. return;
  2076. }
  2077. /*
  2078. * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
  2079. * into the locally allocated *buf
  2080. */
  2081. sg_copy_to_buffer(cmd->t_data_sg,
  2082. cmd->t_data_nents,
  2083. buf,
  2084. cmd->data_length);
  2085. /*
  2086. * Now perform the XOR against the BIDI read memory located at
  2087. * cmd->t_mem_bidi_list
  2088. */
  2089. offset = 0;
  2090. for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
  2091. addr = kmap_atomic(sg_page(sg), KM_USER0);
  2092. if (!addr)
  2093. goto out;
  2094. for (i = 0; i < sg->length; i++)
  2095. *(addr + sg->offset + i) ^= *(buf + offset + i);
  2096. offset += sg->length;
  2097. kunmap_atomic(addr, KM_USER0);
  2098. }
  2099. out:
  2100. kfree(buf);
  2101. }
  2102. /*
  2103. * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
  2104. */
  2105. static int transport_get_sense_data(struct se_cmd *cmd)
  2106. {
  2107. unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
  2108. struct se_device *dev = cmd->se_dev;
  2109. struct se_task *task = NULL, *task_tmp;
  2110. unsigned long flags;
  2111. u32 offset = 0;
  2112. WARN_ON(!cmd->se_lun);
  2113. if (!dev)
  2114. return 0;
  2115. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2116. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
  2117. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2118. return 0;
  2119. }
  2120. list_for_each_entry_safe(task, task_tmp,
  2121. &cmd->t_task_list, t_list) {
  2122. if (!(task->task_flags & TF_HAS_SENSE))
  2123. continue;
  2124. if (!dev->transport->get_sense_buffer) {
  2125. pr_err("dev->transport->get_sense_buffer"
  2126. " is NULL\n");
  2127. continue;
  2128. }
  2129. sense_buffer = dev->transport->get_sense_buffer(task);
  2130. if (!sense_buffer) {
  2131. pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
  2132. " sense buffer for task with sense\n",
  2133. cmd->se_tfo->get_task_tag(cmd), task);
  2134. continue;
  2135. }
  2136. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2137. offset = cmd->se_tfo->set_fabric_sense_len(cmd,
  2138. TRANSPORT_SENSE_BUFFER);
  2139. memcpy(&buffer[offset], sense_buffer,
  2140. TRANSPORT_SENSE_BUFFER);
  2141. cmd->scsi_status = task->task_scsi_status;
  2142. /* Automatically padded */
  2143. cmd->scsi_sense_length =
  2144. (TRANSPORT_SENSE_BUFFER + offset);
  2145. pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
  2146. " and sense\n",
  2147. dev->se_hba->hba_id, dev->transport->name,
  2148. cmd->scsi_status);
  2149. return 0;
  2150. }
  2151. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2152. return -1;
  2153. }
  2154. static inline long long transport_dev_end_lba(struct se_device *dev)
  2155. {
  2156. return dev->transport->get_blocks(dev) + 1;
  2157. }
  2158. static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
  2159. {
  2160. struct se_device *dev = cmd->se_dev;
  2161. u32 sectors;
  2162. if (dev->transport->get_device_type(dev) != TYPE_DISK)
  2163. return 0;
  2164. sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
  2165. if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
  2166. pr_err("LBA: %llu Sectors: %u exceeds"
  2167. " transport_dev_end_lba(): %llu\n",
  2168. cmd->t_task_lba, sectors,
  2169. transport_dev_end_lba(dev));
  2170. return -EINVAL;
  2171. }
  2172. return 0;
  2173. }
  2174. static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
  2175. {
  2176. /*
  2177. * Determine if the received WRITE_SAME is used to for direct
  2178. * passthrough into Linux/SCSI with struct request via TCM/pSCSI
  2179. * or we are signaling the use of internal WRITE_SAME + UNMAP=1
  2180. * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
  2181. */
  2182. int passthrough = (dev->transport->transport_type ==
  2183. TRANSPORT_PLUGIN_PHBA_PDEV);
  2184. if (!passthrough) {
  2185. if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
  2186. pr_err("WRITE_SAME PBDATA and LBDATA"
  2187. " bits not supported for Block Discard"
  2188. " Emulation\n");
  2189. return -ENOSYS;
  2190. }
  2191. /*
  2192. * Currently for the emulated case we only accept
  2193. * tpws with the UNMAP=1 bit set.
  2194. */
  2195. if (!(flags[0] & 0x08)) {
  2196. pr_err("WRITE_SAME w/o UNMAP bit not"
  2197. " supported for Block Discard Emulation\n");
  2198. return -ENOSYS;
  2199. }
  2200. }
  2201. return 0;
  2202. }
  2203. /* transport_generic_cmd_sequencer():
  2204. *
  2205. * Generic Command Sequencer that should work for most DAS transport
  2206. * drivers.
  2207. *
  2208. * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
  2209. * RX Thread.
  2210. *
  2211. * FIXME: Need to support other SCSI OPCODES where as well.
  2212. */
  2213. static int transport_generic_cmd_sequencer(
  2214. struct se_cmd *cmd,
  2215. unsigned char *cdb)
  2216. {
  2217. struct se_device *dev = cmd->se_dev;
  2218. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  2219. int ret = 0, sector_ret = 0, passthrough;
  2220. u32 sectors = 0, size = 0, pr_reg_type = 0;
  2221. u16 service_action;
  2222. u8 alua_ascq = 0;
  2223. /*
  2224. * Check for an existing UNIT ATTENTION condition
  2225. */
  2226. if (core_scsi3_ua_check(cmd, cdb) < 0) {
  2227. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2228. cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
  2229. return -EINVAL;
  2230. }
  2231. /*
  2232. * Check status of Asymmetric Logical Unit Assignment port
  2233. */
  2234. ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
  2235. if (ret != 0) {
  2236. /*
  2237. * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
  2238. * The ALUA additional sense code qualifier (ASCQ) is determined
  2239. * by the ALUA primary or secondary access state..
  2240. */
  2241. if (ret > 0) {
  2242. #if 0
  2243. pr_debug("[%s]: ALUA TG Port not available,"
  2244. " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
  2245. cmd->se_tfo->get_fabric_name(), alua_ascq);
  2246. #endif
  2247. transport_set_sense_codes(cmd, 0x04, alua_ascq);
  2248. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2249. cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
  2250. return -EINVAL;
  2251. }
  2252. goto out_invalid_cdb_field;
  2253. }
  2254. /*
  2255. * Check status for SPC-3 Persistent Reservations
  2256. */
  2257. if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
  2258. if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
  2259. cmd, cdb, pr_reg_type) != 0) {
  2260. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2261. cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
  2262. cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
  2263. return -EBUSY;
  2264. }
  2265. /*
  2266. * This means the CDB is allowed for the SCSI Initiator port
  2267. * when said port is *NOT* holding the legacy SPC-2 or
  2268. * SPC-3 Persistent Reservation.
  2269. */
  2270. }
  2271. /*
  2272. * If we operate in passthrough mode we skip most CDB emulation and
  2273. * instead hand the commands down to the physical SCSI device.
  2274. */
  2275. passthrough =
  2276. (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
  2277. switch (cdb[0]) {
  2278. case READ_6:
  2279. sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
  2280. if (sector_ret)
  2281. goto out_unsupported_cdb;
  2282. size = transport_get_size(sectors, cdb, cmd);
  2283. cmd->t_task_lba = transport_lba_21(cdb);
  2284. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2285. break;
  2286. case READ_10:
  2287. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2288. if (sector_ret)
  2289. goto out_unsupported_cdb;
  2290. size = transport_get_size(sectors, cdb, cmd);
  2291. cmd->t_task_lba = transport_lba_32(cdb);
  2292. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2293. break;
  2294. case READ_12:
  2295. sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
  2296. if (sector_ret)
  2297. goto out_unsupported_cdb;
  2298. size = transport_get_size(sectors, cdb, cmd);
  2299. cmd->t_task_lba = transport_lba_32(cdb);
  2300. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2301. break;
  2302. case READ_16:
  2303. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2304. if (sector_ret)
  2305. goto out_unsupported_cdb;
  2306. size = transport_get_size(sectors, cdb, cmd);
  2307. cmd->t_task_lba = transport_lba_64(cdb);
  2308. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2309. break;
  2310. case WRITE_6:
  2311. sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
  2312. if (sector_ret)
  2313. goto out_unsupported_cdb;
  2314. size = transport_get_size(sectors, cdb, cmd);
  2315. cmd->t_task_lba = transport_lba_21(cdb);
  2316. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2317. break;
  2318. case WRITE_10:
  2319. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2320. if (sector_ret)
  2321. goto out_unsupported_cdb;
  2322. size = transport_get_size(sectors, cdb, cmd);
  2323. cmd->t_task_lba = transport_lba_32(cdb);
  2324. if (cdb[1] & 0x8)
  2325. cmd->se_cmd_flags |= SCF_FUA;
  2326. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2327. break;
  2328. case WRITE_12:
  2329. sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
  2330. if (sector_ret)
  2331. goto out_unsupported_cdb;
  2332. size = transport_get_size(sectors, cdb, cmd);
  2333. cmd->t_task_lba = transport_lba_32(cdb);
  2334. if (cdb[1] & 0x8)
  2335. cmd->se_cmd_flags |= SCF_FUA;
  2336. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2337. break;
  2338. case WRITE_16:
  2339. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2340. if (sector_ret)
  2341. goto out_unsupported_cdb;
  2342. size = transport_get_size(sectors, cdb, cmd);
  2343. cmd->t_task_lba = transport_lba_64(cdb);
  2344. if (cdb[1] & 0x8)
  2345. cmd->se_cmd_flags |= SCF_FUA;
  2346. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2347. break;
  2348. case XDWRITEREAD_10:
  2349. if ((cmd->data_direction != DMA_TO_DEVICE) ||
  2350. !(cmd->se_cmd_flags & SCF_BIDI))
  2351. goto out_invalid_cdb_field;
  2352. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2353. if (sector_ret)
  2354. goto out_unsupported_cdb;
  2355. size = transport_get_size(sectors, cdb, cmd);
  2356. cmd->t_task_lba = transport_lba_32(cdb);
  2357. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2358. /*
  2359. * Do now allow BIDI commands for passthrough mode.
  2360. */
  2361. if (passthrough)
  2362. goto out_unsupported_cdb;
  2363. /*
  2364. * Setup BIDI XOR callback to be run after I/O completion.
  2365. */
  2366. cmd->transport_complete_callback = &transport_xor_callback;
  2367. if (cdb[1] & 0x8)
  2368. cmd->se_cmd_flags |= SCF_FUA;
  2369. break;
  2370. case VARIABLE_LENGTH_CMD:
  2371. service_action = get_unaligned_be16(&cdb[8]);
  2372. switch (service_action) {
  2373. case XDWRITEREAD_32:
  2374. sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
  2375. if (sector_ret)
  2376. goto out_unsupported_cdb;
  2377. size = transport_get_size(sectors, cdb, cmd);
  2378. /*
  2379. * Use WRITE_32 and READ_32 opcodes for the emulated
  2380. * XDWRITE_READ_32 logic.
  2381. */
  2382. cmd->t_task_lba = transport_lba_64_ext(cdb);
  2383. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2384. /*
  2385. * Do now allow BIDI commands for passthrough mode.
  2386. */
  2387. if (passthrough)
  2388. goto out_unsupported_cdb;
  2389. /*
  2390. * Setup BIDI XOR callback to be run during after I/O
  2391. * completion.
  2392. */
  2393. cmd->transport_complete_callback = &transport_xor_callback;
  2394. if (cdb[1] & 0x8)
  2395. cmd->se_cmd_flags |= SCF_FUA;
  2396. break;
  2397. case WRITE_SAME_32:
  2398. sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
  2399. if (sector_ret)
  2400. goto out_unsupported_cdb;
  2401. if (sectors)
  2402. size = transport_get_size(1, cdb, cmd);
  2403. else {
  2404. pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
  2405. " supported\n");
  2406. goto out_invalid_cdb_field;
  2407. }
  2408. cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
  2409. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2410. if (target_check_write_same_discard(&cdb[10], dev) < 0)
  2411. goto out_invalid_cdb_field;
  2412. if (!passthrough)
  2413. cmd->execute_task = target_emulate_write_same;
  2414. break;
  2415. default:
  2416. pr_err("VARIABLE_LENGTH_CMD service action"
  2417. " 0x%04x not supported\n", service_action);
  2418. goto out_unsupported_cdb;
  2419. }
  2420. break;
  2421. case MAINTENANCE_IN:
  2422. if (dev->transport->get_device_type(dev) != TYPE_ROM) {
  2423. /* MAINTENANCE_IN from SCC-2 */
  2424. /*
  2425. * Check for emulated MI_REPORT_TARGET_PGS.
  2426. */
  2427. if (cdb[1] == MI_REPORT_TARGET_PGS &&
  2428. su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
  2429. cmd->execute_task =
  2430. target_emulate_report_target_port_groups;
  2431. }
  2432. size = (cdb[6] << 24) | (cdb[7] << 16) |
  2433. (cdb[8] << 8) | cdb[9];
  2434. } else {
  2435. /* GPCMD_SEND_KEY from multi media commands */
  2436. size = (cdb[8] << 8) + cdb[9];
  2437. }
  2438. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2439. break;
  2440. case MODE_SELECT:
  2441. size = cdb[4];
  2442. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2443. break;
  2444. case MODE_SELECT_10:
  2445. size = (cdb[7] << 8) + cdb[8];
  2446. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2447. break;
  2448. case MODE_SENSE:
  2449. size = cdb[4];
  2450. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2451. if (!passthrough)
  2452. cmd->execute_task = target_emulate_modesense;
  2453. break;
  2454. case MODE_SENSE_10:
  2455. size = (cdb[7] << 8) + cdb[8];
  2456. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2457. if (!passthrough)
  2458. cmd->execute_task = target_emulate_modesense;
  2459. break;
  2460. case GPCMD_READ_BUFFER_CAPACITY:
  2461. case GPCMD_SEND_OPC:
  2462. case LOG_SELECT:
  2463. case LOG_SENSE:
  2464. size = (cdb[7] << 8) + cdb[8];
  2465. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2466. break;
  2467. case READ_BLOCK_LIMITS:
  2468. size = READ_BLOCK_LEN;
  2469. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2470. break;
  2471. case GPCMD_GET_CONFIGURATION:
  2472. case GPCMD_READ_FORMAT_CAPACITIES:
  2473. case GPCMD_READ_DISC_INFO:
  2474. case GPCMD_READ_TRACK_RZONE_INFO:
  2475. size = (cdb[7] << 8) + cdb[8];
  2476. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2477. break;
  2478. case PERSISTENT_RESERVE_IN:
  2479. if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
  2480. cmd->execute_task = target_scsi3_emulate_pr_in;
  2481. size = (cdb[7] << 8) + cdb[8];
  2482. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2483. break;
  2484. case PERSISTENT_RESERVE_OUT:
  2485. if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
  2486. cmd->execute_task = target_scsi3_emulate_pr_out;
  2487. size = (cdb[7] << 8) + cdb[8];
  2488. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2489. break;
  2490. case GPCMD_MECHANISM_STATUS:
  2491. case GPCMD_READ_DVD_STRUCTURE:
  2492. size = (cdb[8] << 8) + cdb[9];
  2493. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2494. break;
  2495. case READ_POSITION:
  2496. size = READ_POSITION_LEN;
  2497. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2498. break;
  2499. case MAINTENANCE_OUT:
  2500. if (dev->transport->get_device_type(dev) != TYPE_ROM) {
  2501. /* MAINTENANCE_OUT from SCC-2
  2502. *
  2503. * Check for emulated MO_SET_TARGET_PGS.
  2504. */
  2505. if (cdb[1] == MO_SET_TARGET_PGS &&
  2506. su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
  2507. cmd->execute_task =
  2508. target_emulate_set_target_port_groups;
  2509. }
  2510. size = (cdb[6] << 24) | (cdb[7] << 16) |
  2511. (cdb[8] << 8) | cdb[9];
  2512. } else {
  2513. /* GPCMD_REPORT_KEY from multi media commands */
  2514. size = (cdb[8] << 8) + cdb[9];
  2515. }
  2516. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2517. break;
  2518. case INQUIRY:
  2519. size = (cdb[3] << 8) + cdb[4];
  2520. /*
  2521. * Do implict HEAD_OF_QUEUE processing for INQUIRY.
  2522. * See spc4r17 section 5.3
  2523. */
  2524. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  2525. cmd->sam_task_attr = MSG_HEAD_TAG;
  2526. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2527. if (!passthrough)
  2528. cmd->execute_task = target_emulate_inquiry;
  2529. break;
  2530. case READ_BUFFER:
  2531. size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
  2532. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2533. break;
  2534. case READ_CAPACITY:
  2535. size = READ_CAP_LEN;
  2536. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2537. if (!passthrough)
  2538. cmd->execute_task = target_emulate_readcapacity;
  2539. break;
  2540. case READ_MEDIA_SERIAL_NUMBER:
  2541. case SECURITY_PROTOCOL_IN:
  2542. case SECURITY_PROTOCOL_OUT:
  2543. size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
  2544. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2545. break;
  2546. case SERVICE_ACTION_IN:
  2547. switch (cmd->t_task_cdb[1] & 0x1f) {
  2548. case SAI_READ_CAPACITY_16:
  2549. if (!passthrough)
  2550. cmd->execute_task =
  2551. target_emulate_readcapacity_16;
  2552. break;
  2553. default:
  2554. if (passthrough)
  2555. break;
  2556. pr_err("Unsupported SA: 0x%02x\n",
  2557. cmd->t_task_cdb[1] & 0x1f);
  2558. goto out_unsupported_cdb;
  2559. }
  2560. /*FALLTHROUGH*/
  2561. case ACCESS_CONTROL_IN:
  2562. case ACCESS_CONTROL_OUT:
  2563. case EXTENDED_COPY:
  2564. case READ_ATTRIBUTE:
  2565. case RECEIVE_COPY_RESULTS:
  2566. case WRITE_ATTRIBUTE:
  2567. size = (cdb[10] << 24) | (cdb[11] << 16) |
  2568. (cdb[12] << 8) | cdb[13];
  2569. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2570. break;
  2571. case RECEIVE_DIAGNOSTIC:
  2572. case SEND_DIAGNOSTIC:
  2573. size = (cdb[3] << 8) | cdb[4];
  2574. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2575. break;
  2576. /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
  2577. #if 0
  2578. case GPCMD_READ_CD:
  2579. sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
  2580. size = (2336 * sectors);
  2581. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2582. break;
  2583. #endif
  2584. case READ_TOC:
  2585. size = cdb[8];
  2586. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2587. break;
  2588. case REQUEST_SENSE:
  2589. size = cdb[4];
  2590. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2591. if (!passthrough)
  2592. cmd->execute_task = target_emulate_request_sense;
  2593. break;
  2594. case READ_ELEMENT_STATUS:
  2595. size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
  2596. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2597. break;
  2598. case WRITE_BUFFER:
  2599. size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
  2600. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2601. break;
  2602. case RESERVE:
  2603. case RESERVE_10:
  2604. /*
  2605. * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
  2606. * Assume the passthrough or $FABRIC_MOD will tell us about it.
  2607. */
  2608. if (cdb[0] == RESERVE_10)
  2609. size = (cdb[7] << 8) | cdb[8];
  2610. else
  2611. size = cmd->data_length;
  2612. /*
  2613. * Setup the legacy emulated handler for SPC-2 and
  2614. * >= SPC-3 compatible reservation handling (CRH=1)
  2615. * Otherwise, we assume the underlying SCSI logic is
  2616. * is running in SPC_PASSTHROUGH, and wants reservations
  2617. * emulation disabled.
  2618. */
  2619. if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
  2620. cmd->execute_task = target_scsi2_reservation_reserve;
  2621. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2622. break;
  2623. case RELEASE:
  2624. case RELEASE_10:
  2625. /*
  2626. * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
  2627. * Assume the passthrough or $FABRIC_MOD will tell us about it.
  2628. */
  2629. if (cdb[0] == RELEASE_10)
  2630. size = (cdb[7] << 8) | cdb[8];
  2631. else
  2632. size = cmd->data_length;
  2633. if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
  2634. cmd->execute_task = target_scsi2_reservation_release;
  2635. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2636. break;
  2637. case SYNCHRONIZE_CACHE:
  2638. case 0x91: /* SYNCHRONIZE_CACHE_16: */
  2639. /*
  2640. * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
  2641. */
  2642. if (cdb[0] == SYNCHRONIZE_CACHE) {
  2643. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2644. cmd->t_task_lba = transport_lba_32(cdb);
  2645. } else {
  2646. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2647. cmd->t_task_lba = transport_lba_64(cdb);
  2648. }
  2649. if (sector_ret)
  2650. goto out_unsupported_cdb;
  2651. size = transport_get_size(sectors, cdb, cmd);
  2652. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2653. if (passthrough)
  2654. break;
  2655. /*
  2656. * Check to ensure that LBA + Range does not exceed past end of
  2657. * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
  2658. */
  2659. if ((cmd->t_task_lba != 0) || (sectors != 0)) {
  2660. if (transport_cmd_get_valid_sectors(cmd) < 0)
  2661. goto out_invalid_cdb_field;
  2662. }
  2663. cmd->execute_task = target_emulate_synchronize_cache;
  2664. break;
  2665. case UNMAP:
  2666. size = get_unaligned_be16(&cdb[7]);
  2667. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2668. if (!passthrough)
  2669. cmd->execute_task = target_emulate_unmap;
  2670. break;
  2671. case WRITE_SAME_16:
  2672. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2673. if (sector_ret)
  2674. goto out_unsupported_cdb;
  2675. if (sectors)
  2676. size = transport_get_size(1, cdb, cmd);
  2677. else {
  2678. pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
  2679. goto out_invalid_cdb_field;
  2680. }
  2681. cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
  2682. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2683. if (target_check_write_same_discard(&cdb[1], dev) < 0)
  2684. goto out_invalid_cdb_field;
  2685. if (!passthrough)
  2686. cmd->execute_task = target_emulate_write_same;
  2687. break;
  2688. case WRITE_SAME:
  2689. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2690. if (sector_ret)
  2691. goto out_unsupported_cdb;
  2692. if (sectors)
  2693. size = transport_get_size(1, cdb, cmd);
  2694. else {
  2695. pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
  2696. goto out_invalid_cdb_field;
  2697. }
  2698. cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
  2699. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2700. /*
  2701. * Follow sbcr26 with WRITE_SAME (10) and check for the existence
  2702. * of byte 1 bit 3 UNMAP instead of original reserved field
  2703. */
  2704. if (target_check_write_same_discard(&cdb[1], dev) < 0)
  2705. goto out_invalid_cdb_field;
  2706. if (!passthrough)
  2707. cmd->execute_task = target_emulate_write_same;
  2708. break;
  2709. case ALLOW_MEDIUM_REMOVAL:
  2710. case ERASE:
  2711. case REZERO_UNIT:
  2712. case SEEK_10:
  2713. case SPACE:
  2714. case START_STOP:
  2715. case TEST_UNIT_READY:
  2716. case VERIFY:
  2717. case WRITE_FILEMARKS:
  2718. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2719. if (!passthrough)
  2720. cmd->execute_task = target_emulate_noop;
  2721. break;
  2722. case GPCMD_CLOSE_TRACK:
  2723. case INITIALIZE_ELEMENT_STATUS:
  2724. case GPCMD_LOAD_UNLOAD:
  2725. case GPCMD_SET_SPEED:
  2726. case MOVE_MEDIUM:
  2727. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2728. break;
  2729. case REPORT_LUNS:
  2730. cmd->execute_task = target_report_luns;
  2731. size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
  2732. /*
  2733. * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
  2734. * See spc4r17 section 5.3
  2735. */
  2736. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  2737. cmd->sam_task_attr = MSG_HEAD_TAG;
  2738. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2739. break;
  2740. default:
  2741. pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
  2742. " 0x%02x, sending CHECK_CONDITION.\n",
  2743. cmd->se_tfo->get_fabric_name(), cdb[0]);
  2744. goto out_unsupported_cdb;
  2745. }
  2746. if (size != cmd->data_length) {
  2747. pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
  2748. " %u does not match SCSI CDB Length: %u for SAM Opcode:"
  2749. " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
  2750. cmd->data_length, size, cdb[0]);
  2751. cmd->cmd_spdtl = size;
  2752. if (cmd->data_direction == DMA_TO_DEVICE) {
  2753. pr_err("Rejecting underflow/overflow"
  2754. " WRITE data\n");
  2755. goto out_invalid_cdb_field;
  2756. }
  2757. /*
  2758. * Reject READ_* or WRITE_* with overflow/underflow for
  2759. * type SCF_SCSI_DATA_SG_IO_CDB.
  2760. */
  2761. if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
  2762. pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
  2763. " CDB on non 512-byte sector setup subsystem"
  2764. " plugin: %s\n", dev->transport->name);
  2765. /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
  2766. goto out_invalid_cdb_field;
  2767. }
  2768. if (size > cmd->data_length) {
  2769. cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
  2770. cmd->residual_count = (size - cmd->data_length);
  2771. } else {
  2772. cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
  2773. cmd->residual_count = (cmd->data_length - size);
  2774. }
  2775. cmd->data_length = size;
  2776. }
  2777. /* reject any command that we don't have a handler for */
  2778. if (!(passthrough || cmd->execute_task ||
  2779. (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
  2780. goto out_unsupported_cdb;
  2781. transport_set_supported_SAM_opcode(cmd);
  2782. return ret;
  2783. out_unsupported_cdb:
  2784. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2785. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  2786. return -EINVAL;
  2787. out_invalid_cdb_field:
  2788. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2789. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  2790. return -EINVAL;
  2791. }
  2792. /*
  2793. * Called from I/O completion to determine which dormant/delayed
  2794. * and ordered cmds need to have their tasks added to the execution queue.
  2795. */
  2796. static void transport_complete_task_attr(struct se_cmd *cmd)
  2797. {
  2798. struct se_device *dev = cmd->se_dev;
  2799. struct se_cmd *cmd_p, *cmd_tmp;
  2800. int new_active_tasks = 0;
  2801. if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
  2802. atomic_dec(&dev->simple_cmds);
  2803. smp_mb__after_atomic_dec();
  2804. dev->dev_cur_ordered_id++;
  2805. pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
  2806. " SIMPLE: %u\n", dev->dev_cur_ordered_id,
  2807. cmd->se_ordered_id);
  2808. } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
  2809. dev->dev_cur_ordered_id++;
  2810. pr_debug("Incremented dev_cur_ordered_id: %u for"
  2811. " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
  2812. cmd->se_ordered_id);
  2813. } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
  2814. atomic_dec(&dev->dev_ordered_sync);
  2815. smp_mb__after_atomic_dec();
  2816. dev->dev_cur_ordered_id++;
  2817. pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
  2818. " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
  2819. }
  2820. /*
  2821. * Process all commands up to the last received
  2822. * ORDERED task attribute which requires another blocking
  2823. * boundary
  2824. */
  2825. spin_lock(&dev->delayed_cmd_lock);
  2826. list_for_each_entry_safe(cmd_p, cmd_tmp,
  2827. &dev->delayed_cmd_list, se_delayed_node) {
  2828. list_del(&cmd_p->se_delayed_node);
  2829. spin_unlock(&dev->delayed_cmd_lock);
  2830. pr_debug("Calling add_tasks() for"
  2831. " cmd_p: 0x%02x Task Attr: 0x%02x"
  2832. " Dormant -> Active, se_ordered_id: %u\n",
  2833. cmd_p->t_task_cdb[0],
  2834. cmd_p->sam_task_attr, cmd_p->se_ordered_id);
  2835. transport_add_tasks_from_cmd(cmd_p);
  2836. new_active_tasks++;
  2837. spin_lock(&dev->delayed_cmd_lock);
  2838. if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
  2839. break;
  2840. }
  2841. spin_unlock(&dev->delayed_cmd_lock);
  2842. /*
  2843. * If new tasks have become active, wake up the transport thread
  2844. * to do the processing of the Active tasks.
  2845. */
  2846. if (new_active_tasks != 0)
  2847. wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
  2848. }
  2849. static void transport_complete_qf(struct se_cmd *cmd)
  2850. {
  2851. int ret = 0;
  2852. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  2853. transport_complete_task_attr(cmd);
  2854. if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
  2855. ret = cmd->se_tfo->queue_status(cmd);
  2856. if (ret)
  2857. goto out;
  2858. }
  2859. switch (cmd->data_direction) {
  2860. case DMA_FROM_DEVICE:
  2861. ret = cmd->se_tfo->queue_data_in(cmd);
  2862. break;
  2863. case DMA_TO_DEVICE:
  2864. if (cmd->t_bidi_data_sg) {
  2865. ret = cmd->se_tfo->queue_data_in(cmd);
  2866. if (ret < 0)
  2867. break;
  2868. }
  2869. /* Fall through for DMA_TO_DEVICE */
  2870. case DMA_NONE:
  2871. ret = cmd->se_tfo->queue_status(cmd);
  2872. break;
  2873. default:
  2874. break;
  2875. }
  2876. out:
  2877. if (ret < 0) {
  2878. transport_handle_queue_full(cmd, cmd->se_dev);
  2879. return;
  2880. }
  2881. transport_lun_remove_cmd(cmd);
  2882. transport_cmd_check_stop_to_fabric(cmd);
  2883. }
  2884. static void transport_handle_queue_full(
  2885. struct se_cmd *cmd,
  2886. struct se_device *dev)
  2887. {
  2888. spin_lock_irq(&dev->qf_cmd_lock);
  2889. list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
  2890. atomic_inc(&dev->dev_qf_count);
  2891. smp_mb__after_atomic_inc();
  2892. spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
  2893. schedule_work(&cmd->se_dev->qf_work_queue);
  2894. }
  2895. static void target_complete_ok_work(struct work_struct *work)
  2896. {
  2897. struct se_cmd *cmd = container_of(work, struct se_cmd, work);
  2898. int reason = 0, ret;
  2899. /*
  2900. * Check if we need to move delayed/dormant tasks from cmds on the
  2901. * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
  2902. * Attribute.
  2903. */
  2904. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  2905. transport_complete_task_attr(cmd);
  2906. /*
  2907. * Check to schedule QUEUE_FULL work, or execute an existing
  2908. * cmd->transport_qf_callback()
  2909. */
  2910. if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
  2911. schedule_work(&cmd->se_dev->qf_work_queue);
  2912. /*
  2913. * Check if we need to retrieve a sense buffer from
  2914. * the struct se_cmd in question.
  2915. */
  2916. if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
  2917. if (transport_get_sense_data(cmd) < 0)
  2918. reason = TCM_NON_EXISTENT_LUN;
  2919. /*
  2920. * Only set when an struct se_task->task_scsi_status returned
  2921. * a non GOOD status.
  2922. */
  2923. if (cmd->scsi_status) {
  2924. ret = transport_send_check_condition_and_sense(
  2925. cmd, reason, 1);
  2926. if (ret == -EAGAIN || ret == -ENOMEM)
  2927. goto queue_full;
  2928. transport_lun_remove_cmd(cmd);
  2929. transport_cmd_check_stop_to_fabric(cmd);
  2930. return;
  2931. }
  2932. }
  2933. /*
  2934. * Check for a callback, used by amongst other things
  2935. * XDWRITE_READ_10 emulation.
  2936. */
  2937. if (cmd->transport_complete_callback)
  2938. cmd->transport_complete_callback(cmd);
  2939. switch (cmd->data_direction) {
  2940. case DMA_FROM_DEVICE:
  2941. spin_lock(&cmd->se_lun->lun_sep_lock);
  2942. if (cmd->se_lun->lun_sep) {
  2943. cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
  2944. cmd->data_length;
  2945. }
  2946. spin_unlock(&cmd->se_lun->lun_sep_lock);
  2947. ret = cmd->se_tfo->queue_data_in(cmd);
  2948. if (ret == -EAGAIN || ret == -ENOMEM)
  2949. goto queue_full;
  2950. break;
  2951. case DMA_TO_DEVICE:
  2952. spin_lock(&cmd->se_lun->lun_sep_lock);
  2953. if (cmd->se_lun->lun_sep) {
  2954. cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
  2955. cmd->data_length;
  2956. }
  2957. spin_unlock(&cmd->se_lun->lun_sep_lock);
  2958. /*
  2959. * Check if we need to send READ payload for BIDI-COMMAND
  2960. */
  2961. if (cmd->t_bidi_data_sg) {
  2962. spin_lock(&cmd->se_lun->lun_sep_lock);
  2963. if (cmd->se_lun->lun_sep) {
  2964. cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
  2965. cmd->data_length;
  2966. }
  2967. spin_unlock(&cmd->se_lun->lun_sep_lock);
  2968. ret = cmd->se_tfo->queue_data_in(cmd);
  2969. if (ret == -EAGAIN || ret == -ENOMEM)
  2970. goto queue_full;
  2971. break;
  2972. }
  2973. /* Fall through for DMA_TO_DEVICE */
  2974. case DMA_NONE:
  2975. ret = cmd->se_tfo->queue_status(cmd);
  2976. if (ret == -EAGAIN || ret == -ENOMEM)
  2977. goto queue_full;
  2978. break;
  2979. default:
  2980. break;
  2981. }
  2982. transport_lun_remove_cmd(cmd);
  2983. transport_cmd_check_stop_to_fabric(cmd);
  2984. return;
  2985. queue_full:
  2986. pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
  2987. " data_direction: %d\n", cmd, cmd->data_direction);
  2988. cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
  2989. transport_handle_queue_full(cmd, cmd->se_dev);
  2990. }
  2991. static void transport_free_dev_tasks(struct se_cmd *cmd)
  2992. {
  2993. struct se_task *task, *task_tmp;
  2994. unsigned long flags;
  2995. LIST_HEAD(dispose_list);
  2996. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2997. list_for_each_entry_safe(task, task_tmp,
  2998. &cmd->t_task_list, t_list) {
  2999. if (!(task->task_flags & TF_ACTIVE))
  3000. list_move_tail(&task->t_list, &dispose_list);
  3001. }
  3002. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3003. while (!list_empty(&dispose_list)) {
  3004. task = list_first_entry(&dispose_list, struct se_task, t_list);
  3005. if (task->task_sg != cmd->t_data_sg &&
  3006. task->task_sg != cmd->t_bidi_data_sg)
  3007. kfree(task->task_sg);
  3008. list_del(&task->t_list);
  3009. cmd->se_dev->transport->free_task(task);
  3010. }
  3011. }
  3012. static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
  3013. {
  3014. struct scatterlist *sg;
  3015. int count;
  3016. for_each_sg(sgl, sg, nents, count)
  3017. __free_page(sg_page(sg));
  3018. kfree(sgl);
  3019. }
  3020. static inline void transport_free_pages(struct se_cmd *cmd)
  3021. {
  3022. if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
  3023. return;
  3024. transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
  3025. cmd->t_data_sg = NULL;
  3026. cmd->t_data_nents = 0;
  3027. transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
  3028. cmd->t_bidi_data_sg = NULL;
  3029. cmd->t_bidi_data_nents = 0;
  3030. }
  3031. /**
  3032. * transport_release_cmd - free a command
  3033. * @cmd: command to free
  3034. *
  3035. * This routine unconditionally frees a command, and reference counting
  3036. * or list removal must be done in the caller.
  3037. */
  3038. static void transport_release_cmd(struct se_cmd *cmd)
  3039. {
  3040. BUG_ON(!cmd->se_tfo);
  3041. if (cmd->se_tmr_req)
  3042. core_tmr_release_req(cmd->se_tmr_req);
  3043. if (cmd->t_task_cdb != cmd->__t_task_cdb)
  3044. kfree(cmd->t_task_cdb);
  3045. /*
  3046. * If this cmd has been setup with target_get_sess_cmd(), drop
  3047. * the kref and call ->release_cmd() in kref callback.
  3048. */
  3049. if (cmd->check_release != 0) {
  3050. target_put_sess_cmd(cmd->se_sess, cmd);
  3051. return;
  3052. }
  3053. cmd->se_tfo->release_cmd(cmd);
  3054. }
  3055. /**
  3056. * transport_put_cmd - release a reference to a command
  3057. * @cmd: command to release
  3058. *
  3059. * This routine releases our reference to the command and frees it if possible.
  3060. */
  3061. static void transport_put_cmd(struct se_cmd *cmd)
  3062. {
  3063. unsigned long flags;
  3064. int free_tasks = 0;
  3065. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3066. if (atomic_read(&cmd->t_fe_count)) {
  3067. if (!atomic_dec_and_test(&cmd->t_fe_count))
  3068. goto out_busy;
  3069. }
  3070. if (atomic_read(&cmd->t_se_count)) {
  3071. if (!atomic_dec_and_test(&cmd->t_se_count))
  3072. goto out_busy;
  3073. }
  3074. if (atomic_read(&cmd->transport_dev_active)) {
  3075. atomic_set(&cmd->transport_dev_active, 0);
  3076. transport_all_task_dev_remove_state(cmd);
  3077. free_tasks = 1;
  3078. }
  3079. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3080. if (free_tasks != 0)
  3081. transport_free_dev_tasks(cmd);
  3082. transport_free_pages(cmd);
  3083. transport_release_cmd(cmd);
  3084. return;
  3085. out_busy:
  3086. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3087. }
  3088. /*
  3089. * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
  3090. * allocating in the core.
  3091. * @cmd: Associated se_cmd descriptor
  3092. * @mem: SGL style memory for TCM WRITE / READ
  3093. * @sg_mem_num: Number of SGL elements
  3094. * @mem_bidi_in: SGL style memory for TCM BIDI READ
  3095. * @sg_mem_bidi_num: Number of BIDI READ SGL elements
  3096. *
  3097. * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
  3098. * of parameters.
  3099. */
  3100. int transport_generic_map_mem_to_cmd(
  3101. struct se_cmd *cmd,
  3102. struct scatterlist *sgl,
  3103. u32 sgl_count,
  3104. struct scatterlist *sgl_bidi,
  3105. u32 sgl_bidi_count)
  3106. {
  3107. if (!sgl || !sgl_count)
  3108. return 0;
  3109. if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
  3110. (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
  3111. /*
  3112. * Reject SCSI data overflow with map_mem_to_cmd() as incoming
  3113. * scatterlists already have been set to follow what the fabric
  3114. * passes for the original expected data transfer length.
  3115. */
  3116. if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  3117. pr_warn("Rejecting SCSI DATA overflow for fabric using"
  3118. " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
  3119. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  3120. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  3121. return -EINVAL;
  3122. }
  3123. cmd->t_data_sg = sgl;
  3124. cmd->t_data_nents = sgl_count;
  3125. if (sgl_bidi && sgl_bidi_count) {
  3126. cmd->t_bidi_data_sg = sgl_bidi;
  3127. cmd->t_bidi_data_nents = sgl_bidi_count;
  3128. }
  3129. cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  3130. }
  3131. return 0;
  3132. }
  3133. EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
  3134. void *transport_kmap_data_sg(struct se_cmd *cmd)
  3135. {
  3136. struct scatterlist *sg = cmd->t_data_sg;
  3137. struct page **pages;
  3138. int i;
  3139. BUG_ON(!sg);
  3140. /*
  3141. * We need to take into account a possible offset here for fabrics like
  3142. * tcm_loop who may be using a contig buffer from the SCSI midlayer for
  3143. * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
  3144. */
  3145. if (!cmd->t_data_nents)
  3146. return NULL;
  3147. else if (cmd->t_data_nents == 1)
  3148. return kmap(sg_page(sg)) + sg->offset;
  3149. /* >1 page. use vmap */
  3150. pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
  3151. if (!pages)
  3152. return NULL;
  3153. /* convert sg[] to pages[] */
  3154. for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
  3155. pages[i] = sg_page(sg);
  3156. }
  3157. cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
  3158. kfree(pages);
  3159. if (!cmd->t_data_vmap)
  3160. return NULL;
  3161. return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
  3162. }
  3163. EXPORT_SYMBOL(transport_kmap_data_sg);
  3164. void transport_kunmap_data_sg(struct se_cmd *cmd)
  3165. {
  3166. if (!cmd->t_data_nents)
  3167. return;
  3168. else if (cmd->t_data_nents == 1)
  3169. kunmap(sg_page(cmd->t_data_sg));
  3170. vunmap(cmd->t_data_vmap);
  3171. cmd->t_data_vmap = NULL;
  3172. }
  3173. EXPORT_SYMBOL(transport_kunmap_data_sg);
  3174. static int
  3175. transport_generic_get_mem(struct se_cmd *cmd)
  3176. {
  3177. u32 length = cmd->data_length;
  3178. unsigned int nents;
  3179. struct page *page;
  3180. gfp_t zero_flag;
  3181. int i = 0;
  3182. nents = DIV_ROUND_UP(length, PAGE_SIZE);
  3183. cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
  3184. if (!cmd->t_data_sg)
  3185. return -ENOMEM;
  3186. cmd->t_data_nents = nents;
  3187. sg_init_table(cmd->t_data_sg, nents);
  3188. zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
  3189. while (length) {
  3190. u32 page_len = min_t(u32, length, PAGE_SIZE);
  3191. page = alloc_page(GFP_KERNEL | zero_flag);
  3192. if (!page)
  3193. goto out;
  3194. sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
  3195. length -= page_len;
  3196. i++;
  3197. }
  3198. return 0;
  3199. out:
  3200. while (i >= 0) {
  3201. __free_page(sg_page(&cmd->t_data_sg[i]));
  3202. i--;
  3203. }
  3204. kfree(cmd->t_data_sg);
  3205. cmd->t_data_sg = NULL;
  3206. return -ENOMEM;
  3207. }
  3208. /* Reduce sectors if they are too long for the device */
  3209. static inline sector_t transport_limit_task_sectors(
  3210. struct se_device *dev,
  3211. unsigned long long lba,
  3212. sector_t sectors)
  3213. {
  3214. sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
  3215. if (dev->transport->get_device_type(dev) == TYPE_DISK)
  3216. if ((lba + sectors) > transport_dev_end_lba(dev))
  3217. sectors = ((transport_dev_end_lba(dev) - lba) + 1);
  3218. return sectors;
  3219. }
  3220. /*
  3221. * This function can be used by HW target mode drivers to create a linked
  3222. * scatterlist from all contiguously allocated struct se_task->task_sg[].
  3223. * This is intended to be called during the completion path by TCM Core
  3224. * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
  3225. */
  3226. void transport_do_task_sg_chain(struct se_cmd *cmd)
  3227. {
  3228. struct scatterlist *sg_first = NULL;
  3229. struct scatterlist *sg_prev = NULL;
  3230. int sg_prev_nents = 0;
  3231. struct scatterlist *sg;
  3232. struct se_task *task;
  3233. u32 chained_nents = 0;
  3234. int i;
  3235. BUG_ON(!cmd->se_tfo->task_sg_chaining);
  3236. /*
  3237. * Walk the struct se_task list and setup scatterlist chains
  3238. * for each contiguously allocated struct se_task->task_sg[].
  3239. */
  3240. list_for_each_entry(task, &cmd->t_task_list, t_list) {
  3241. if (!task->task_sg)
  3242. continue;
  3243. if (!sg_first) {
  3244. sg_first = task->task_sg;
  3245. chained_nents = task->task_sg_nents;
  3246. } else {
  3247. sg_chain(sg_prev, sg_prev_nents, task->task_sg);
  3248. chained_nents += task->task_sg_nents;
  3249. }
  3250. /*
  3251. * For the padded tasks, use the extra SGL vector allocated
  3252. * in transport_allocate_data_tasks() for the sg_prev_nents
  3253. * offset into sg_chain() above.
  3254. *
  3255. * We do not need the padding for the last task (or a single
  3256. * task), but in that case we will never use the sg_prev_nents
  3257. * value below which would be incorrect.
  3258. */
  3259. sg_prev_nents = (task->task_sg_nents + 1);
  3260. sg_prev = task->task_sg;
  3261. }
  3262. /*
  3263. * Setup the starting pointer and total t_tasks_sg_linked_no including
  3264. * padding SGs for linking and to mark the end.
  3265. */
  3266. cmd->t_tasks_sg_chained = sg_first;
  3267. cmd->t_tasks_sg_chained_no = chained_nents;
  3268. pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
  3269. " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
  3270. cmd->t_tasks_sg_chained_no);
  3271. for_each_sg(cmd->t_tasks_sg_chained, sg,
  3272. cmd->t_tasks_sg_chained_no, i) {
  3273. pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
  3274. i, sg, sg_page(sg), sg->length, sg->offset);
  3275. if (sg_is_chain(sg))
  3276. pr_debug("SG: %p sg_is_chain=1\n", sg);
  3277. if (sg_is_last(sg))
  3278. pr_debug("SG: %p sg_is_last=1\n", sg);
  3279. }
  3280. }
  3281. EXPORT_SYMBOL(transport_do_task_sg_chain);
  3282. /*
  3283. * Break up cmd into chunks transport can handle
  3284. */
  3285. static int
  3286. transport_allocate_data_tasks(struct se_cmd *cmd,
  3287. enum dma_data_direction data_direction,
  3288. struct scatterlist *cmd_sg, unsigned int sgl_nents)
  3289. {
  3290. struct se_device *dev = cmd->se_dev;
  3291. int task_count, i;
  3292. unsigned long long lba;
  3293. sector_t sectors, dev_max_sectors;
  3294. u32 sector_size;
  3295. if (transport_cmd_get_valid_sectors(cmd) < 0)
  3296. return -EINVAL;
  3297. dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
  3298. sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
  3299. WARN_ON(cmd->data_length % sector_size);
  3300. lba = cmd->t_task_lba;
  3301. sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
  3302. task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
  3303. /*
  3304. * If we need just a single task reuse the SG list in the command
  3305. * and avoid a lot of work.
  3306. */
  3307. if (task_count == 1) {
  3308. struct se_task *task;
  3309. unsigned long flags;
  3310. task = transport_generic_get_task(cmd, data_direction);
  3311. if (!task)
  3312. return -ENOMEM;
  3313. task->task_sg = cmd_sg;
  3314. task->task_sg_nents = sgl_nents;
  3315. task->task_lba = lba;
  3316. task->task_sectors = sectors;
  3317. task->task_size = task->task_sectors * sector_size;
  3318. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3319. list_add_tail(&task->t_list, &cmd->t_task_list);
  3320. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3321. return task_count;
  3322. }
  3323. for (i = 0; i < task_count; i++) {
  3324. struct se_task *task;
  3325. unsigned int task_size, task_sg_nents_padded;
  3326. struct scatterlist *sg;
  3327. unsigned long flags;
  3328. int count;
  3329. task = transport_generic_get_task(cmd, data_direction);
  3330. if (!task)
  3331. return -ENOMEM;
  3332. task->task_lba = lba;
  3333. task->task_sectors = min(sectors, dev_max_sectors);
  3334. task->task_size = task->task_sectors * sector_size;
  3335. /*
  3336. * This now assumes that passed sg_ents are in PAGE_SIZE chunks
  3337. * in order to calculate the number per task SGL entries
  3338. */
  3339. task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
  3340. /*
  3341. * Check if the fabric module driver is requesting that all
  3342. * struct se_task->task_sg[] be chained together.. If so,
  3343. * then allocate an extra padding SG entry for linking and
  3344. * marking the end of the chained SGL for every task except
  3345. * the last one for (task_count > 1) operation, or skipping
  3346. * the extra padding for the (task_count == 1) case.
  3347. */
  3348. if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
  3349. task_sg_nents_padded = (task->task_sg_nents + 1);
  3350. } else
  3351. task_sg_nents_padded = task->task_sg_nents;
  3352. task->task_sg = kmalloc(sizeof(struct scatterlist) *
  3353. task_sg_nents_padded, GFP_KERNEL);
  3354. if (!task->task_sg) {
  3355. cmd->se_dev->transport->free_task(task);
  3356. return -ENOMEM;
  3357. }
  3358. sg_init_table(task->task_sg, task_sg_nents_padded);
  3359. task_size = task->task_size;
  3360. /* Build new sgl, only up to task_size */
  3361. for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
  3362. if (cmd_sg->length > task_size)
  3363. break;
  3364. *sg = *cmd_sg;
  3365. task_size -= cmd_sg->length;
  3366. cmd_sg = sg_next(cmd_sg);
  3367. }
  3368. lba += task->task_sectors;
  3369. sectors -= task->task_sectors;
  3370. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3371. list_add_tail(&task->t_list, &cmd->t_task_list);
  3372. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3373. }
  3374. return task_count;
  3375. }
  3376. static int
  3377. transport_allocate_control_task(struct se_cmd *cmd)
  3378. {
  3379. struct se_task *task;
  3380. unsigned long flags;
  3381. /* Workaround for handling zero-length control CDBs */
  3382. if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
  3383. !cmd->data_length)
  3384. return 0;
  3385. task = transport_generic_get_task(cmd, cmd->data_direction);
  3386. if (!task)
  3387. return -ENOMEM;
  3388. task->task_sg = cmd->t_data_sg;
  3389. task->task_size = cmd->data_length;
  3390. task->task_sg_nents = cmd->t_data_nents;
  3391. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3392. list_add_tail(&task->t_list, &cmd->t_task_list);
  3393. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3394. /* Success! Return number of tasks allocated */
  3395. return 1;
  3396. }
  3397. /*
  3398. * Allocate any required ressources to execute the command, and either place
  3399. * it on the execution queue if possible. For writes we might not have the
  3400. * payload yet, thus notify the fabric via a call to ->write_pending instead.
  3401. */
  3402. int transport_generic_new_cmd(struct se_cmd *cmd)
  3403. {
  3404. struct se_device *dev = cmd->se_dev;
  3405. int task_cdbs, task_cdbs_bidi = 0;
  3406. int set_counts = 1;
  3407. int ret = 0;
  3408. /*
  3409. * Determine is the TCM fabric module has already allocated physical
  3410. * memory, and is directly calling transport_generic_map_mem_to_cmd()
  3411. * beforehand.
  3412. */
  3413. if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
  3414. cmd->data_length) {
  3415. ret = transport_generic_get_mem(cmd);
  3416. if (ret < 0)
  3417. goto out_fail;
  3418. }
  3419. /*
  3420. * For BIDI command set up the read tasks first.
  3421. */
  3422. if (cmd->t_bidi_data_sg &&
  3423. dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
  3424. BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
  3425. task_cdbs_bidi = transport_allocate_data_tasks(cmd,
  3426. DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
  3427. cmd->t_bidi_data_nents);
  3428. if (task_cdbs_bidi <= 0)
  3429. goto out_fail;
  3430. atomic_inc(&cmd->t_fe_count);
  3431. atomic_inc(&cmd->t_se_count);
  3432. set_counts = 0;
  3433. }
  3434. if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
  3435. task_cdbs = transport_allocate_data_tasks(cmd,
  3436. cmd->data_direction, cmd->t_data_sg,
  3437. cmd->t_data_nents);
  3438. } else {
  3439. task_cdbs = transport_allocate_control_task(cmd);
  3440. }
  3441. if (task_cdbs < 0)
  3442. goto out_fail;
  3443. else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
  3444. cmd->t_state = TRANSPORT_COMPLETE;
  3445. atomic_set(&cmd->t_transport_active, 1);
  3446. if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
  3447. u8 ua_asc = 0, ua_ascq = 0;
  3448. core_scsi3_ua_clear_for_request_sense(cmd,
  3449. &ua_asc, &ua_ascq);
  3450. }
  3451. INIT_WORK(&cmd->work, target_complete_ok_work);
  3452. queue_work(target_completion_wq, &cmd->work);
  3453. return 0;
  3454. }
  3455. if (set_counts) {
  3456. atomic_inc(&cmd->t_fe_count);
  3457. atomic_inc(&cmd->t_se_count);
  3458. }
  3459. cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
  3460. atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
  3461. atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
  3462. /*
  3463. * For WRITEs, let the fabric know its buffer is ready..
  3464. * This WRITE struct se_cmd (and all of its associated struct se_task's)
  3465. * will be added to the struct se_device execution queue after its WRITE
  3466. * data has arrived. (ie: It gets handled by the transport processing
  3467. * thread a second time)
  3468. */
  3469. if (cmd->data_direction == DMA_TO_DEVICE) {
  3470. transport_add_tasks_to_state_queue(cmd);
  3471. return transport_generic_write_pending(cmd);
  3472. }
  3473. /*
  3474. * Everything else but a WRITE, add the struct se_cmd's struct se_task's
  3475. * to the execution queue.
  3476. */
  3477. transport_execute_tasks(cmd);
  3478. return 0;
  3479. out_fail:
  3480. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  3481. cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  3482. return -EINVAL;
  3483. }
  3484. EXPORT_SYMBOL(transport_generic_new_cmd);
  3485. /* transport_generic_process_write():
  3486. *
  3487. *
  3488. */
  3489. void transport_generic_process_write(struct se_cmd *cmd)
  3490. {
  3491. transport_execute_tasks(cmd);
  3492. }
  3493. EXPORT_SYMBOL(transport_generic_process_write);
  3494. static void transport_write_pending_qf(struct se_cmd *cmd)
  3495. {
  3496. int ret;
  3497. ret = cmd->se_tfo->write_pending(cmd);
  3498. if (ret == -EAGAIN || ret == -ENOMEM) {
  3499. pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
  3500. cmd);
  3501. transport_handle_queue_full(cmd, cmd->se_dev);
  3502. }
  3503. }
  3504. static int transport_generic_write_pending(struct se_cmd *cmd)
  3505. {
  3506. unsigned long flags;
  3507. int ret;
  3508. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3509. cmd->t_state = TRANSPORT_WRITE_PENDING;
  3510. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3511. /*
  3512. * Clear the se_cmd for WRITE_PENDING status in order to set
  3513. * cmd->t_transport_active=0 so that transport_generic_handle_data
  3514. * can be called from HW target mode interrupt code. This is safe
  3515. * to be called with transport_off=1 before the cmd->se_tfo->write_pending
  3516. * because the se_cmd->se_lun pointer is not being cleared.
  3517. */
  3518. transport_cmd_check_stop(cmd, 1, 0);
  3519. /*
  3520. * Call the fabric write_pending function here to let the
  3521. * frontend know that WRITE buffers are ready.
  3522. */
  3523. ret = cmd->se_tfo->write_pending(cmd);
  3524. if (ret == -EAGAIN || ret == -ENOMEM)
  3525. goto queue_full;
  3526. else if (ret < 0)
  3527. return ret;
  3528. return 1;
  3529. queue_full:
  3530. pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
  3531. cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
  3532. transport_handle_queue_full(cmd, cmd->se_dev);
  3533. return 0;
  3534. }
  3535. void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
  3536. {
  3537. if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
  3538. if (wait_for_tasks && cmd->se_tmr_req)
  3539. transport_wait_for_tasks(cmd);
  3540. transport_release_cmd(cmd);
  3541. } else {
  3542. if (wait_for_tasks)
  3543. transport_wait_for_tasks(cmd);
  3544. core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
  3545. if (cmd->se_lun)
  3546. transport_lun_remove_cmd(cmd);
  3547. transport_free_dev_tasks(cmd);
  3548. transport_put_cmd(cmd);
  3549. }
  3550. }
  3551. EXPORT_SYMBOL(transport_generic_free_cmd);
  3552. /* target_get_sess_cmd - Add command to active ->sess_cmd_list
  3553. * @se_sess: session to reference
  3554. * @se_cmd: command descriptor to add
  3555. * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
  3556. */
  3557. void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
  3558. bool ack_kref)
  3559. {
  3560. unsigned long flags;
  3561. kref_init(&se_cmd->cmd_kref);
  3562. /*
  3563. * Add a second kref if the fabric caller is expecting to handle
  3564. * fabric acknowledgement that requires two target_put_sess_cmd()
  3565. * invocations before se_cmd descriptor release.
  3566. */
  3567. if (ack_kref == true)
  3568. kref_get(&se_cmd->cmd_kref);
  3569. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  3570. list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
  3571. se_cmd->check_release = 1;
  3572. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3573. }
  3574. EXPORT_SYMBOL(target_get_sess_cmd);
  3575. static void target_release_cmd_kref(struct kref *kref)
  3576. {
  3577. struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
  3578. struct se_session *se_sess = se_cmd->se_sess;
  3579. unsigned long flags;
  3580. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  3581. if (list_empty(&se_cmd->se_cmd_list)) {
  3582. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3583. WARN_ON(1);
  3584. return;
  3585. }
  3586. if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
  3587. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3588. complete(&se_cmd->cmd_wait_comp);
  3589. return;
  3590. }
  3591. list_del(&se_cmd->se_cmd_list);
  3592. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3593. se_cmd->se_tfo->release_cmd(se_cmd);
  3594. }
  3595. /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
  3596. * @se_sess: session to reference
  3597. * @se_cmd: command descriptor to drop
  3598. */
  3599. int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
  3600. {
  3601. return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
  3602. }
  3603. EXPORT_SYMBOL(target_put_sess_cmd);
  3604. /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
  3605. * @se_sess: session to split
  3606. */
  3607. void target_splice_sess_cmd_list(struct se_session *se_sess)
  3608. {
  3609. struct se_cmd *se_cmd;
  3610. unsigned long flags;
  3611. WARN_ON(!list_empty(&se_sess->sess_wait_list));
  3612. INIT_LIST_HEAD(&se_sess->sess_wait_list);
  3613. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  3614. se_sess->sess_tearing_down = 1;
  3615. list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
  3616. list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
  3617. se_cmd->cmd_wait_set = 1;
  3618. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3619. }
  3620. EXPORT_SYMBOL(target_splice_sess_cmd_list);
  3621. /* target_wait_for_sess_cmds - Wait for outstanding descriptors
  3622. * @se_sess: session to wait for active I/O
  3623. * @wait_for_tasks: Make extra transport_wait_for_tasks call
  3624. */
  3625. void target_wait_for_sess_cmds(
  3626. struct se_session *se_sess,
  3627. int wait_for_tasks)
  3628. {
  3629. struct se_cmd *se_cmd, *tmp_cmd;
  3630. bool rc = false;
  3631. list_for_each_entry_safe(se_cmd, tmp_cmd,
  3632. &se_sess->sess_wait_list, se_cmd_list) {
  3633. list_del(&se_cmd->se_cmd_list);
  3634. pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
  3635. " %d\n", se_cmd, se_cmd->t_state,
  3636. se_cmd->se_tfo->get_cmd_state(se_cmd));
  3637. if (wait_for_tasks) {
  3638. pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
  3639. " fabric state: %d\n", se_cmd, se_cmd->t_state,
  3640. se_cmd->se_tfo->get_cmd_state(se_cmd));
  3641. rc = transport_wait_for_tasks(se_cmd);
  3642. pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
  3643. " fabric state: %d\n", se_cmd, se_cmd->t_state,
  3644. se_cmd->se_tfo->get_cmd_state(se_cmd));
  3645. }
  3646. if (!rc) {
  3647. wait_for_completion(&se_cmd->cmd_wait_comp);
  3648. pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
  3649. " fabric state: %d\n", se_cmd, se_cmd->t_state,
  3650. se_cmd->se_tfo->get_cmd_state(se_cmd));
  3651. }
  3652. se_cmd->se_tfo->release_cmd(se_cmd);
  3653. }
  3654. }
  3655. EXPORT_SYMBOL(target_wait_for_sess_cmds);
  3656. /* transport_lun_wait_for_tasks():
  3657. *
  3658. * Called from ConfigFS context to stop the passed struct se_cmd to allow
  3659. * an struct se_lun to be successfully shutdown.
  3660. */
  3661. static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
  3662. {
  3663. unsigned long flags;
  3664. int ret;
  3665. /*
  3666. * If the frontend has already requested this struct se_cmd to
  3667. * be stopped, we can safely ignore this struct se_cmd.
  3668. */
  3669. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3670. if (atomic_read(&cmd->t_transport_stop)) {
  3671. atomic_set(&cmd->transport_lun_stop, 0);
  3672. pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
  3673. " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
  3674. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3675. transport_cmd_check_stop(cmd, 1, 0);
  3676. return -EPERM;
  3677. }
  3678. atomic_set(&cmd->transport_lun_fe_stop, 1);
  3679. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3680. wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
  3681. ret = transport_stop_tasks_for_cmd(cmd);
  3682. pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
  3683. " %d\n", cmd, cmd->t_task_list_num, ret);
  3684. if (!ret) {
  3685. pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
  3686. cmd->se_tfo->get_task_tag(cmd));
  3687. wait_for_completion(&cmd->transport_lun_stop_comp);
  3688. pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
  3689. cmd->se_tfo->get_task_tag(cmd));
  3690. }
  3691. transport_remove_cmd_from_queue(cmd);
  3692. return 0;
  3693. }
  3694. static void __transport_clear_lun_from_sessions(struct se_lun *lun)
  3695. {
  3696. struct se_cmd *cmd = NULL;
  3697. unsigned long lun_flags, cmd_flags;
  3698. /*
  3699. * Do exception processing and return CHECK_CONDITION status to the
  3700. * Initiator Port.
  3701. */
  3702. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  3703. while (!list_empty(&lun->lun_cmd_list)) {
  3704. cmd = list_first_entry(&lun->lun_cmd_list,
  3705. struct se_cmd, se_lun_node);
  3706. list_del(&cmd->se_lun_node);
  3707. atomic_set(&cmd->transport_lun_active, 0);
  3708. /*
  3709. * This will notify iscsi_target_transport.c:
  3710. * transport_cmd_check_stop() that a LUN shutdown is in
  3711. * progress for the iscsi_cmd_t.
  3712. */
  3713. spin_lock(&cmd->t_state_lock);
  3714. pr_debug("SE_LUN[%d] - Setting cmd->transport"
  3715. "_lun_stop for ITT: 0x%08x\n",
  3716. cmd->se_lun->unpacked_lun,
  3717. cmd->se_tfo->get_task_tag(cmd));
  3718. atomic_set(&cmd->transport_lun_stop, 1);
  3719. spin_unlock(&cmd->t_state_lock);
  3720. spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
  3721. if (!cmd->se_lun) {
  3722. pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
  3723. cmd->se_tfo->get_task_tag(cmd),
  3724. cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
  3725. BUG();
  3726. }
  3727. /*
  3728. * If the Storage engine still owns the iscsi_cmd_t, determine
  3729. * and/or stop its context.
  3730. */
  3731. pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
  3732. "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
  3733. cmd->se_tfo->get_task_tag(cmd));
  3734. if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
  3735. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  3736. continue;
  3737. }
  3738. pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
  3739. "_wait_for_tasks(): SUCCESS\n",
  3740. cmd->se_lun->unpacked_lun,
  3741. cmd->se_tfo->get_task_tag(cmd));
  3742. spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
  3743. if (!atomic_read(&cmd->transport_dev_active)) {
  3744. spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
  3745. goto check_cond;
  3746. }
  3747. atomic_set(&cmd->transport_dev_active, 0);
  3748. transport_all_task_dev_remove_state(cmd);
  3749. spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
  3750. transport_free_dev_tasks(cmd);
  3751. /*
  3752. * The Storage engine stopped this struct se_cmd before it was
  3753. * send to the fabric frontend for delivery back to the
  3754. * Initiator Node. Return this SCSI CDB back with an
  3755. * CHECK_CONDITION status.
  3756. */
  3757. check_cond:
  3758. transport_send_check_condition_and_sense(cmd,
  3759. TCM_NON_EXISTENT_LUN, 0);
  3760. /*
  3761. * If the fabric frontend is waiting for this iscsi_cmd_t to
  3762. * be released, notify the waiting thread now that LU has
  3763. * finished accessing it.
  3764. */
  3765. spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
  3766. if (atomic_read(&cmd->transport_lun_fe_stop)) {
  3767. pr_debug("SE_LUN[%d] - Detected FE stop for"
  3768. " struct se_cmd: %p ITT: 0x%08x\n",
  3769. lun->unpacked_lun,
  3770. cmd, cmd->se_tfo->get_task_tag(cmd));
  3771. spin_unlock_irqrestore(&cmd->t_state_lock,
  3772. cmd_flags);
  3773. transport_cmd_check_stop(cmd, 1, 0);
  3774. complete(&cmd->transport_lun_fe_stop_comp);
  3775. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  3776. continue;
  3777. }
  3778. pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
  3779. lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
  3780. spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
  3781. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  3782. }
  3783. spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
  3784. }
  3785. static int transport_clear_lun_thread(void *p)
  3786. {
  3787. struct se_lun *lun = p;
  3788. __transport_clear_lun_from_sessions(lun);
  3789. complete(&lun->lun_shutdown_comp);
  3790. return 0;
  3791. }
  3792. int transport_clear_lun_from_sessions(struct se_lun *lun)
  3793. {
  3794. struct task_struct *kt;
  3795. kt = kthread_run(transport_clear_lun_thread, lun,
  3796. "tcm_cl_%u", lun->unpacked_lun);
  3797. if (IS_ERR(kt)) {
  3798. pr_err("Unable to start clear_lun thread\n");
  3799. return PTR_ERR(kt);
  3800. }
  3801. wait_for_completion(&lun->lun_shutdown_comp);
  3802. return 0;
  3803. }
  3804. /**
  3805. * transport_wait_for_tasks - wait for completion to occur
  3806. * @cmd: command to wait
  3807. *
  3808. * Called from frontend fabric context to wait for storage engine
  3809. * to pause and/or release frontend generated struct se_cmd.
  3810. */
  3811. bool transport_wait_for_tasks(struct se_cmd *cmd)
  3812. {
  3813. unsigned long flags;
  3814. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3815. if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
  3816. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3817. return false;
  3818. }
  3819. /*
  3820. * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
  3821. * has been set in transport_set_supported_SAM_opcode().
  3822. */
  3823. if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
  3824. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3825. return false;
  3826. }
  3827. /*
  3828. * If we are already stopped due to an external event (ie: LUN shutdown)
  3829. * sleep until the connection can have the passed struct se_cmd back.
  3830. * The cmd->transport_lun_stopped_sem will be upped by
  3831. * transport_clear_lun_from_sessions() once the ConfigFS context caller
  3832. * has completed its operation on the struct se_cmd.
  3833. */
  3834. if (atomic_read(&cmd->transport_lun_stop)) {
  3835. pr_debug("wait_for_tasks: Stopping"
  3836. " wait_for_completion(&cmd->t_tasktransport_lun_fe"
  3837. "_stop_comp); for ITT: 0x%08x\n",
  3838. cmd->se_tfo->get_task_tag(cmd));
  3839. /*
  3840. * There is a special case for WRITES where a FE exception +
  3841. * LUN shutdown means ConfigFS context is still sleeping on
  3842. * transport_lun_stop_comp in transport_lun_wait_for_tasks().
  3843. * We go ahead and up transport_lun_stop_comp just to be sure
  3844. * here.
  3845. */
  3846. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3847. complete(&cmd->transport_lun_stop_comp);
  3848. wait_for_completion(&cmd->transport_lun_fe_stop_comp);
  3849. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3850. transport_all_task_dev_remove_state(cmd);
  3851. /*
  3852. * At this point, the frontend who was the originator of this
  3853. * struct se_cmd, now owns the structure and can be released through
  3854. * normal means below.
  3855. */
  3856. pr_debug("wait_for_tasks: Stopped"
  3857. " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
  3858. "stop_comp); for ITT: 0x%08x\n",
  3859. cmd->se_tfo->get_task_tag(cmd));
  3860. atomic_set(&cmd->transport_lun_stop, 0);
  3861. }
  3862. if (!atomic_read(&cmd->t_transport_active) ||
  3863. atomic_read(&cmd->t_transport_aborted)) {
  3864. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3865. return false;
  3866. }
  3867. atomic_set(&cmd->t_transport_stop, 1);
  3868. pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
  3869. " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
  3870. cmd, cmd->se_tfo->get_task_tag(cmd),
  3871. cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
  3872. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3873. wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
  3874. wait_for_completion(&cmd->t_transport_stop_comp);
  3875. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3876. atomic_set(&cmd->t_transport_active, 0);
  3877. atomic_set(&cmd->t_transport_stop, 0);
  3878. pr_debug("wait_for_tasks: Stopped wait_for_compltion("
  3879. "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
  3880. cmd->se_tfo->get_task_tag(cmd));
  3881. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3882. return true;
  3883. }
  3884. EXPORT_SYMBOL(transport_wait_for_tasks);
  3885. static int transport_get_sense_codes(
  3886. struct se_cmd *cmd,
  3887. u8 *asc,
  3888. u8 *ascq)
  3889. {
  3890. *asc = cmd->scsi_asc;
  3891. *ascq = cmd->scsi_ascq;
  3892. return 0;
  3893. }
  3894. static int transport_set_sense_codes(
  3895. struct se_cmd *cmd,
  3896. u8 asc,
  3897. u8 ascq)
  3898. {
  3899. cmd->scsi_asc = asc;
  3900. cmd->scsi_ascq = ascq;
  3901. return 0;
  3902. }
  3903. int transport_send_check_condition_and_sense(
  3904. struct se_cmd *cmd,
  3905. u8 reason,
  3906. int from_transport)
  3907. {
  3908. unsigned char *buffer = cmd->sense_buffer;
  3909. unsigned long flags;
  3910. int offset;
  3911. u8 asc = 0, ascq = 0;
  3912. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3913. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
  3914. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3915. return 0;
  3916. }
  3917. cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
  3918. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3919. if (!reason && from_transport)
  3920. goto after_reason;
  3921. if (!from_transport)
  3922. cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
  3923. /*
  3924. * Data Segment and SenseLength of the fabric response PDU.
  3925. *
  3926. * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
  3927. * from include/scsi/scsi_cmnd.h
  3928. */
  3929. offset = cmd->se_tfo->set_fabric_sense_len(cmd,
  3930. TRANSPORT_SENSE_BUFFER);
  3931. /*
  3932. * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
  3933. * SENSE KEY values from include/scsi/scsi.h
  3934. */
  3935. switch (reason) {
  3936. case TCM_NON_EXISTENT_LUN:
  3937. /* CURRENT ERROR */
  3938. buffer[offset] = 0x70;
  3939. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  3940. /* ILLEGAL REQUEST */
  3941. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  3942. /* LOGICAL UNIT NOT SUPPORTED */
  3943. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
  3944. break;
  3945. case TCM_UNSUPPORTED_SCSI_OPCODE:
  3946. case TCM_SECTOR_COUNT_TOO_MANY:
  3947. /* CURRENT ERROR */
  3948. buffer[offset] = 0x70;
  3949. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  3950. /* ILLEGAL REQUEST */
  3951. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  3952. /* INVALID COMMAND OPERATION CODE */
  3953. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
  3954. break;
  3955. case TCM_UNKNOWN_MODE_PAGE:
  3956. /* CURRENT ERROR */
  3957. buffer[offset] = 0x70;
  3958. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  3959. /* ILLEGAL REQUEST */
  3960. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  3961. /* INVALID FIELD IN CDB */
  3962. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
  3963. break;
  3964. case TCM_CHECK_CONDITION_ABORT_CMD:
  3965. /* CURRENT ERROR */
  3966. buffer[offset] = 0x70;
  3967. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  3968. /* ABORTED COMMAND */
  3969. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  3970. /* BUS DEVICE RESET FUNCTION OCCURRED */
  3971. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
  3972. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
  3973. break;
  3974. case TCM_INCORRECT_AMOUNT_OF_DATA:
  3975. /* CURRENT ERROR */
  3976. buffer[offset] = 0x70;
  3977. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  3978. /* ABORTED COMMAND */
  3979. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  3980. /* WRITE ERROR */
  3981. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
  3982. /* NOT ENOUGH UNSOLICITED DATA */
  3983. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
  3984. break;
  3985. case TCM_INVALID_CDB_FIELD:
  3986. /* CURRENT ERROR */
  3987. buffer[offset] = 0x70;
  3988. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  3989. /* ILLEGAL REQUEST */
  3990. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  3991. /* INVALID FIELD IN CDB */
  3992. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
  3993. break;
  3994. case TCM_INVALID_PARAMETER_LIST:
  3995. /* CURRENT ERROR */
  3996. buffer[offset] = 0x70;
  3997. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  3998. /* ILLEGAL REQUEST */
  3999. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  4000. /* INVALID FIELD IN PARAMETER LIST */
  4001. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
  4002. break;
  4003. case TCM_UNEXPECTED_UNSOLICITED_DATA:
  4004. /* CURRENT ERROR */
  4005. buffer[offset] = 0x70;
  4006. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  4007. /* ABORTED COMMAND */
  4008. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  4009. /* WRITE ERROR */
  4010. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
  4011. /* UNEXPECTED_UNSOLICITED_DATA */
  4012. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
  4013. break;
  4014. case TCM_SERVICE_CRC_ERROR:
  4015. /* CURRENT ERROR */
  4016. buffer[offset] = 0x70;
  4017. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  4018. /* ABORTED COMMAND */
  4019. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  4020. /* PROTOCOL SERVICE CRC ERROR */
  4021. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
  4022. /* N/A */
  4023. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
  4024. break;
  4025. case TCM_SNACK_REJECTED:
  4026. /* CURRENT ERROR */
  4027. buffer[offset] = 0x70;
  4028. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  4029. /* ABORTED COMMAND */
  4030. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  4031. /* READ ERROR */
  4032. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
  4033. /* FAILED RETRANSMISSION REQUEST */
  4034. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
  4035. break;
  4036. case TCM_WRITE_PROTECTED:
  4037. /* CURRENT ERROR */
  4038. buffer[offset] = 0x70;
  4039. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  4040. /* DATA PROTECT */
  4041. buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
  4042. /* WRITE PROTECTED */
  4043. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
  4044. break;
  4045. case TCM_CHECK_CONDITION_UNIT_ATTENTION:
  4046. /* CURRENT ERROR */
  4047. buffer[offset] = 0x70;
  4048. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  4049. /* UNIT ATTENTION */
  4050. buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
  4051. core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
  4052. buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
  4053. buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
  4054. break;
  4055. case TCM_CHECK_CONDITION_NOT_READY:
  4056. /* CURRENT ERROR */
  4057. buffer[offset] = 0x70;
  4058. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  4059. /* Not Ready */
  4060. buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
  4061. transport_get_sense_codes(cmd, &asc, &ascq);
  4062. buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
  4063. buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
  4064. break;
  4065. case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
  4066. default:
  4067. /* CURRENT ERROR */
  4068. buffer[offset] = 0x70;
  4069. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  4070. /* ILLEGAL REQUEST */
  4071. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  4072. /* LOGICAL UNIT COMMUNICATION FAILURE */
  4073. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
  4074. break;
  4075. }
  4076. /*
  4077. * This code uses linux/include/scsi/scsi.h SAM status codes!
  4078. */
  4079. cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
  4080. /*
  4081. * Automatically padded, this value is encoded in the fabric's
  4082. * data_length response PDU containing the SCSI defined sense data.
  4083. */
  4084. cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
  4085. after_reason:
  4086. return cmd->se_tfo->queue_status(cmd);
  4087. }
  4088. EXPORT_SYMBOL(transport_send_check_condition_and_sense);
  4089. int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
  4090. {
  4091. int ret = 0;
  4092. if (atomic_read(&cmd->t_transport_aborted) != 0) {
  4093. if (!send_status ||
  4094. (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
  4095. return 1;
  4096. #if 0
  4097. pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
  4098. " status for CDB: 0x%02x ITT: 0x%08x\n",
  4099. cmd->t_task_cdb[0],
  4100. cmd->se_tfo->get_task_tag(cmd));
  4101. #endif
  4102. cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
  4103. cmd->se_tfo->queue_status(cmd);
  4104. ret = 1;
  4105. }
  4106. return ret;
  4107. }
  4108. EXPORT_SYMBOL(transport_check_aborted_status);
  4109. void transport_send_task_abort(struct se_cmd *cmd)
  4110. {
  4111. unsigned long flags;
  4112. spin_lock_irqsave(&cmd->t_state_lock, flags);
  4113. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
  4114. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  4115. return;
  4116. }
  4117. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  4118. /*
  4119. * If there are still expected incoming fabric WRITEs, we wait
  4120. * until until they have completed before sending a TASK_ABORTED
  4121. * response. This response with TASK_ABORTED status will be
  4122. * queued back to fabric module by transport_check_aborted_status().
  4123. */
  4124. if (cmd->data_direction == DMA_TO_DEVICE) {
  4125. if (cmd->se_tfo->write_pending_status(cmd) != 0) {
  4126. atomic_inc(&cmd->t_transport_aborted);
  4127. smp_mb__after_atomic_inc();
  4128. }
  4129. }
  4130. cmd->scsi_status = SAM_STAT_TASK_ABORTED;
  4131. #if 0
  4132. pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
  4133. " ITT: 0x%08x\n", cmd->t_task_cdb[0],
  4134. cmd->se_tfo->get_task_tag(cmd));
  4135. #endif
  4136. cmd->se_tfo->queue_status(cmd);
  4137. }
  4138. static int transport_generic_do_tmr(struct se_cmd *cmd)
  4139. {
  4140. struct se_device *dev = cmd->se_dev;
  4141. struct se_tmr_req *tmr = cmd->se_tmr_req;
  4142. int ret;
  4143. switch (tmr->function) {
  4144. case TMR_ABORT_TASK:
  4145. tmr->response = TMR_FUNCTION_REJECTED;
  4146. break;
  4147. case TMR_ABORT_TASK_SET:
  4148. case TMR_CLEAR_ACA:
  4149. case TMR_CLEAR_TASK_SET:
  4150. tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
  4151. break;
  4152. case TMR_LUN_RESET:
  4153. ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
  4154. tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
  4155. TMR_FUNCTION_REJECTED;
  4156. break;
  4157. case TMR_TARGET_WARM_RESET:
  4158. tmr->response = TMR_FUNCTION_REJECTED;
  4159. break;
  4160. case TMR_TARGET_COLD_RESET:
  4161. tmr->response = TMR_FUNCTION_REJECTED;
  4162. break;
  4163. default:
  4164. pr_err("Uknown TMR function: 0x%02x.\n",
  4165. tmr->function);
  4166. tmr->response = TMR_FUNCTION_REJECTED;
  4167. break;
  4168. }
  4169. cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
  4170. cmd->se_tfo->queue_tm_rsp(cmd);
  4171. transport_cmd_check_stop_to_fabric(cmd);
  4172. return 0;
  4173. }
  4174. /* transport_processing_thread():
  4175. *
  4176. *
  4177. */
  4178. static int transport_processing_thread(void *param)
  4179. {
  4180. int ret;
  4181. struct se_cmd *cmd;
  4182. struct se_device *dev = param;
  4183. while (!kthread_should_stop()) {
  4184. ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
  4185. atomic_read(&dev->dev_queue_obj.queue_cnt) ||
  4186. kthread_should_stop());
  4187. if (ret < 0)
  4188. goto out;
  4189. get_cmd:
  4190. cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
  4191. if (!cmd)
  4192. continue;
  4193. switch (cmd->t_state) {
  4194. case TRANSPORT_NEW_CMD:
  4195. BUG();
  4196. break;
  4197. case TRANSPORT_NEW_CMD_MAP:
  4198. if (!cmd->se_tfo->new_cmd_map) {
  4199. pr_err("cmd->se_tfo->new_cmd_map is"
  4200. " NULL for TRANSPORT_NEW_CMD_MAP\n");
  4201. BUG();
  4202. }
  4203. ret = cmd->se_tfo->new_cmd_map(cmd);
  4204. if (ret < 0) {
  4205. transport_generic_request_failure(cmd);
  4206. break;
  4207. }
  4208. ret = transport_generic_new_cmd(cmd);
  4209. if (ret < 0) {
  4210. transport_generic_request_failure(cmd);
  4211. break;
  4212. }
  4213. break;
  4214. case TRANSPORT_PROCESS_WRITE:
  4215. transport_generic_process_write(cmd);
  4216. break;
  4217. case TRANSPORT_PROCESS_TMR:
  4218. transport_generic_do_tmr(cmd);
  4219. break;
  4220. case TRANSPORT_COMPLETE_QF_WP:
  4221. transport_write_pending_qf(cmd);
  4222. break;
  4223. case TRANSPORT_COMPLETE_QF_OK:
  4224. transport_complete_qf(cmd);
  4225. break;
  4226. default:
  4227. pr_err("Unknown t_state: %d for ITT: 0x%08x "
  4228. "i_state: %d on SE LUN: %u\n",
  4229. cmd->t_state,
  4230. cmd->se_tfo->get_task_tag(cmd),
  4231. cmd->se_tfo->get_cmd_state(cmd),
  4232. cmd->se_lun->unpacked_lun);
  4233. BUG();
  4234. }
  4235. goto get_cmd;
  4236. }
  4237. out:
  4238. WARN_ON(!list_empty(&dev->state_task_list));
  4239. WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
  4240. dev->process_thread = NULL;
  4241. return 0;
  4242. }