target_core_transport.c 129 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744
  1. /*******************************************************************************
  2. * Filename: target_core_transport.c
  3. *
  4. * This file contains the Generic Target Engine Core.
  5. *
  6. * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
  7. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  8. * Copyright (c) 2007-2010 Rising Tide Systems
  9. * Copyright (c) 2008-2010 Linux-iSCSI.org
  10. *
  11. * Nicholas A. Bellinger <nab@kernel.org>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. *
  27. ******************************************************************************/
  28. #include <linux/net.h>
  29. #include <linux/delay.h>
  30. #include <linux/string.h>
  31. #include <linux/timer.h>
  32. #include <linux/slab.h>
  33. #include <linux/blkdev.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/kthread.h>
  36. #include <linux/in.h>
  37. #include <linux/cdrom.h>
  38. #include <asm/unaligned.h>
  39. #include <net/sock.h>
  40. #include <net/tcp.h>
  41. #include <scsi/scsi.h>
  42. #include <scsi/scsi_cmnd.h>
  43. #include <scsi/scsi_tcq.h>
  44. #include <target/target_core_base.h>
  45. #include <target/target_core_device.h>
  46. #include <target/target_core_tmr.h>
  47. #include <target/target_core_tpg.h>
  48. #include <target/target_core_transport.h>
  49. #include <target/target_core_fabric_ops.h>
  50. #include <target/target_core_configfs.h>
  51. #include "target_core_alua.h"
  52. #include "target_core_cdb.h"
  53. #include "target_core_hba.h"
  54. #include "target_core_pr.h"
  55. #include "target_core_ua.h"
  56. static int sub_api_initialized;
  57. static struct workqueue_struct *target_completion_wq;
  58. static struct kmem_cache *se_cmd_cache;
  59. static struct kmem_cache *se_sess_cache;
  60. struct kmem_cache *se_tmr_req_cache;
  61. struct kmem_cache *se_ua_cache;
  62. struct kmem_cache *t10_pr_reg_cache;
  63. struct kmem_cache *t10_alua_lu_gp_cache;
  64. struct kmem_cache *t10_alua_lu_gp_mem_cache;
  65. struct kmem_cache *t10_alua_tg_pt_gp_cache;
  66. struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
  67. static int transport_generic_write_pending(struct se_cmd *);
  68. static int transport_processing_thread(void *param);
  69. static int __transport_execute_tasks(struct se_device *dev);
  70. static void transport_complete_task_attr(struct se_cmd *cmd);
  71. static void transport_handle_queue_full(struct se_cmd *cmd,
  72. struct se_device *dev);
  73. static void transport_free_dev_tasks(struct se_cmd *cmd);
  74. static int transport_generic_get_mem(struct se_cmd *cmd);
  75. static void transport_put_cmd(struct se_cmd *cmd);
  76. static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
  77. static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
  78. static void transport_generic_request_failure(struct se_cmd *, int, int);
  79. static void target_complete_ok_work(struct work_struct *work);
  80. int init_se_kmem_caches(void)
  81. {
  82. se_cmd_cache = kmem_cache_create("se_cmd_cache",
  83. sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
  84. if (!se_cmd_cache) {
  85. pr_err("kmem_cache_create for struct se_cmd failed\n");
  86. goto out;
  87. }
  88. se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
  89. sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
  90. 0, NULL);
  91. if (!se_tmr_req_cache) {
  92. pr_err("kmem_cache_create() for struct se_tmr_req"
  93. " failed\n");
  94. goto out_free_cmd_cache;
  95. }
  96. se_sess_cache = kmem_cache_create("se_sess_cache",
  97. sizeof(struct se_session), __alignof__(struct se_session),
  98. 0, NULL);
  99. if (!se_sess_cache) {
  100. pr_err("kmem_cache_create() for struct se_session"
  101. " failed\n");
  102. goto out_free_tmr_req_cache;
  103. }
  104. se_ua_cache = kmem_cache_create("se_ua_cache",
  105. sizeof(struct se_ua), __alignof__(struct se_ua),
  106. 0, NULL);
  107. if (!se_ua_cache) {
  108. pr_err("kmem_cache_create() for struct se_ua failed\n");
  109. goto out_free_sess_cache;
  110. }
  111. t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
  112. sizeof(struct t10_pr_registration),
  113. __alignof__(struct t10_pr_registration), 0, NULL);
  114. if (!t10_pr_reg_cache) {
  115. pr_err("kmem_cache_create() for struct t10_pr_registration"
  116. " failed\n");
  117. goto out_free_ua_cache;
  118. }
  119. t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
  120. sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
  121. 0, NULL);
  122. if (!t10_alua_lu_gp_cache) {
  123. pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
  124. " failed\n");
  125. goto out_free_pr_reg_cache;
  126. }
  127. t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
  128. sizeof(struct t10_alua_lu_gp_member),
  129. __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
  130. if (!t10_alua_lu_gp_mem_cache) {
  131. pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
  132. "cache failed\n");
  133. goto out_free_lu_gp_cache;
  134. }
  135. t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
  136. sizeof(struct t10_alua_tg_pt_gp),
  137. __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
  138. if (!t10_alua_tg_pt_gp_cache) {
  139. pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
  140. "cache failed\n");
  141. goto out_free_lu_gp_mem_cache;
  142. }
  143. t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
  144. "t10_alua_tg_pt_gp_mem_cache",
  145. sizeof(struct t10_alua_tg_pt_gp_member),
  146. __alignof__(struct t10_alua_tg_pt_gp_member),
  147. 0, NULL);
  148. if (!t10_alua_tg_pt_gp_mem_cache) {
  149. pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
  150. "mem_t failed\n");
  151. goto out_free_tg_pt_gp_cache;
  152. }
  153. target_completion_wq = alloc_workqueue("target_completion",
  154. WQ_MEM_RECLAIM, 0);
  155. if (!target_completion_wq)
  156. goto out_free_tg_pt_gp_mem_cache;
  157. return 0;
  158. out_free_tg_pt_gp_mem_cache:
  159. kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
  160. out_free_tg_pt_gp_cache:
  161. kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
  162. out_free_lu_gp_mem_cache:
  163. kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
  164. out_free_lu_gp_cache:
  165. kmem_cache_destroy(t10_alua_lu_gp_cache);
  166. out_free_pr_reg_cache:
  167. kmem_cache_destroy(t10_pr_reg_cache);
  168. out_free_ua_cache:
  169. kmem_cache_destroy(se_ua_cache);
  170. out_free_sess_cache:
  171. kmem_cache_destroy(se_sess_cache);
  172. out_free_tmr_req_cache:
  173. kmem_cache_destroy(se_tmr_req_cache);
  174. out_free_cmd_cache:
  175. kmem_cache_destroy(se_cmd_cache);
  176. out:
  177. return -ENOMEM;
  178. }
  179. void release_se_kmem_caches(void)
  180. {
  181. destroy_workqueue(target_completion_wq);
  182. kmem_cache_destroy(se_cmd_cache);
  183. kmem_cache_destroy(se_tmr_req_cache);
  184. kmem_cache_destroy(se_sess_cache);
  185. kmem_cache_destroy(se_ua_cache);
  186. kmem_cache_destroy(t10_pr_reg_cache);
  187. kmem_cache_destroy(t10_alua_lu_gp_cache);
  188. kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
  189. kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
  190. kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
  191. }
  192. /* This code ensures unique mib indexes are handed out. */
  193. static DEFINE_SPINLOCK(scsi_mib_index_lock);
  194. static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
  195. /*
  196. * Allocate a new row index for the entry type specified
  197. */
  198. u32 scsi_get_new_index(scsi_index_t type)
  199. {
  200. u32 new_index;
  201. BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
  202. spin_lock(&scsi_mib_index_lock);
  203. new_index = ++scsi_mib_index[type];
  204. spin_unlock(&scsi_mib_index_lock);
  205. return new_index;
  206. }
  207. void transport_init_queue_obj(struct se_queue_obj *qobj)
  208. {
  209. atomic_set(&qobj->queue_cnt, 0);
  210. INIT_LIST_HEAD(&qobj->qobj_list);
  211. init_waitqueue_head(&qobj->thread_wq);
  212. spin_lock_init(&qobj->cmd_queue_lock);
  213. }
  214. EXPORT_SYMBOL(transport_init_queue_obj);
  215. void transport_subsystem_check_init(void)
  216. {
  217. int ret;
  218. if (sub_api_initialized)
  219. return;
  220. ret = request_module("target_core_iblock");
  221. if (ret != 0)
  222. pr_err("Unable to load target_core_iblock\n");
  223. ret = request_module("target_core_file");
  224. if (ret != 0)
  225. pr_err("Unable to load target_core_file\n");
  226. ret = request_module("target_core_pscsi");
  227. if (ret != 0)
  228. pr_err("Unable to load target_core_pscsi\n");
  229. ret = request_module("target_core_stgt");
  230. if (ret != 0)
  231. pr_err("Unable to load target_core_stgt\n");
  232. sub_api_initialized = 1;
  233. return;
  234. }
  235. struct se_session *transport_init_session(void)
  236. {
  237. struct se_session *se_sess;
  238. se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
  239. if (!se_sess) {
  240. pr_err("Unable to allocate struct se_session from"
  241. " se_sess_cache\n");
  242. return ERR_PTR(-ENOMEM);
  243. }
  244. INIT_LIST_HEAD(&se_sess->sess_list);
  245. INIT_LIST_HEAD(&se_sess->sess_acl_list);
  246. INIT_LIST_HEAD(&se_sess->sess_cmd_list);
  247. INIT_LIST_HEAD(&se_sess->sess_wait_list);
  248. spin_lock_init(&se_sess->sess_cmd_lock);
  249. return se_sess;
  250. }
  251. EXPORT_SYMBOL(transport_init_session);
  252. /*
  253. * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
  254. */
  255. void __transport_register_session(
  256. struct se_portal_group *se_tpg,
  257. struct se_node_acl *se_nacl,
  258. struct se_session *se_sess,
  259. void *fabric_sess_ptr)
  260. {
  261. unsigned char buf[PR_REG_ISID_LEN];
  262. se_sess->se_tpg = se_tpg;
  263. se_sess->fabric_sess_ptr = fabric_sess_ptr;
  264. /*
  265. * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
  266. *
  267. * Only set for struct se_session's that will actually be moving I/O.
  268. * eg: *NOT* discovery sessions.
  269. */
  270. if (se_nacl) {
  271. /*
  272. * If the fabric module supports an ISID based TransportID,
  273. * save this value in binary from the fabric I_T Nexus now.
  274. */
  275. if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
  276. memset(&buf[0], 0, PR_REG_ISID_LEN);
  277. se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
  278. &buf[0], PR_REG_ISID_LEN);
  279. se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
  280. }
  281. spin_lock_irq(&se_nacl->nacl_sess_lock);
  282. /*
  283. * The se_nacl->nacl_sess pointer will be set to the
  284. * last active I_T Nexus for each struct se_node_acl.
  285. */
  286. se_nacl->nacl_sess = se_sess;
  287. list_add_tail(&se_sess->sess_acl_list,
  288. &se_nacl->acl_sess_list);
  289. spin_unlock_irq(&se_nacl->nacl_sess_lock);
  290. }
  291. list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
  292. pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
  293. se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
  294. }
  295. EXPORT_SYMBOL(__transport_register_session);
  296. void transport_register_session(
  297. struct se_portal_group *se_tpg,
  298. struct se_node_acl *se_nacl,
  299. struct se_session *se_sess,
  300. void *fabric_sess_ptr)
  301. {
  302. spin_lock_bh(&se_tpg->session_lock);
  303. __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
  304. spin_unlock_bh(&se_tpg->session_lock);
  305. }
  306. EXPORT_SYMBOL(transport_register_session);
  307. void transport_deregister_session_configfs(struct se_session *se_sess)
  308. {
  309. struct se_node_acl *se_nacl;
  310. unsigned long flags;
  311. /*
  312. * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
  313. */
  314. se_nacl = se_sess->se_node_acl;
  315. if (se_nacl) {
  316. spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
  317. list_del(&se_sess->sess_acl_list);
  318. /*
  319. * If the session list is empty, then clear the pointer.
  320. * Otherwise, set the struct se_session pointer from the tail
  321. * element of the per struct se_node_acl active session list.
  322. */
  323. if (list_empty(&se_nacl->acl_sess_list))
  324. se_nacl->nacl_sess = NULL;
  325. else {
  326. se_nacl->nacl_sess = container_of(
  327. se_nacl->acl_sess_list.prev,
  328. struct se_session, sess_acl_list);
  329. }
  330. spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
  331. }
  332. }
  333. EXPORT_SYMBOL(transport_deregister_session_configfs);
  334. void transport_free_session(struct se_session *se_sess)
  335. {
  336. kmem_cache_free(se_sess_cache, se_sess);
  337. }
  338. EXPORT_SYMBOL(transport_free_session);
  339. void transport_deregister_session(struct se_session *se_sess)
  340. {
  341. struct se_portal_group *se_tpg = se_sess->se_tpg;
  342. struct se_node_acl *se_nacl;
  343. unsigned long flags;
  344. if (!se_tpg) {
  345. transport_free_session(se_sess);
  346. return;
  347. }
  348. spin_lock_irqsave(&se_tpg->session_lock, flags);
  349. list_del(&se_sess->sess_list);
  350. se_sess->se_tpg = NULL;
  351. se_sess->fabric_sess_ptr = NULL;
  352. spin_unlock_irqrestore(&se_tpg->session_lock, flags);
  353. /*
  354. * Determine if we need to do extra work for this initiator node's
  355. * struct se_node_acl if it had been previously dynamically generated.
  356. */
  357. se_nacl = se_sess->se_node_acl;
  358. if (se_nacl) {
  359. spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
  360. if (se_nacl->dynamic_node_acl) {
  361. if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
  362. se_tpg)) {
  363. list_del(&se_nacl->acl_list);
  364. se_tpg->num_node_acls--;
  365. spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
  366. core_tpg_wait_for_nacl_pr_ref(se_nacl);
  367. core_free_device_list_for_node(se_nacl, se_tpg);
  368. se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
  369. se_nacl);
  370. spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
  371. }
  372. }
  373. spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
  374. }
  375. transport_free_session(se_sess);
  376. pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
  377. se_tpg->se_tpg_tfo->get_fabric_name());
  378. }
  379. EXPORT_SYMBOL(transport_deregister_session);
  380. /*
  381. * Called with cmd->t_state_lock held.
  382. */
  383. static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
  384. {
  385. struct se_device *dev = cmd->se_dev;
  386. struct se_task *task;
  387. unsigned long flags;
  388. if (!dev)
  389. return;
  390. list_for_each_entry(task, &cmd->t_task_list, t_list) {
  391. if (task->task_flags & TF_ACTIVE)
  392. continue;
  393. if (!atomic_read(&task->task_state_active))
  394. continue;
  395. spin_lock_irqsave(&dev->execute_task_lock, flags);
  396. list_del(&task->t_state_list);
  397. pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
  398. cmd->se_tfo->get_task_tag(cmd), dev, task);
  399. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  400. atomic_set(&task->task_state_active, 0);
  401. atomic_dec(&cmd->t_task_cdbs_ex_left);
  402. }
  403. }
  404. /* transport_cmd_check_stop():
  405. *
  406. * 'transport_off = 1' determines if t_transport_active should be cleared.
  407. * 'transport_off = 2' determines if task_dev_state should be removed.
  408. *
  409. * A non-zero u8 t_state sets cmd->t_state.
  410. * Returns 1 when command is stopped, else 0.
  411. */
  412. static int transport_cmd_check_stop(
  413. struct se_cmd *cmd,
  414. int transport_off,
  415. u8 t_state)
  416. {
  417. unsigned long flags;
  418. spin_lock_irqsave(&cmd->t_state_lock, flags);
  419. /*
  420. * Determine if IOCTL context caller in requesting the stopping of this
  421. * command for LUN shutdown purposes.
  422. */
  423. if (atomic_read(&cmd->transport_lun_stop)) {
  424. pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
  425. " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
  426. cmd->se_tfo->get_task_tag(cmd));
  427. atomic_set(&cmd->t_transport_active, 0);
  428. if (transport_off == 2)
  429. transport_all_task_dev_remove_state(cmd);
  430. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  431. complete(&cmd->transport_lun_stop_comp);
  432. return 1;
  433. }
  434. /*
  435. * Determine if frontend context caller is requesting the stopping of
  436. * this command for frontend exceptions.
  437. */
  438. if (atomic_read(&cmd->t_transport_stop)) {
  439. pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
  440. " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
  441. cmd->se_tfo->get_task_tag(cmd));
  442. if (transport_off == 2)
  443. transport_all_task_dev_remove_state(cmd);
  444. /*
  445. * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
  446. * to FE.
  447. */
  448. if (transport_off == 2)
  449. cmd->se_lun = NULL;
  450. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  451. complete(&cmd->t_transport_stop_comp);
  452. return 1;
  453. }
  454. if (transport_off) {
  455. atomic_set(&cmd->t_transport_active, 0);
  456. if (transport_off == 2) {
  457. transport_all_task_dev_remove_state(cmd);
  458. /*
  459. * Clear struct se_cmd->se_lun before the transport_off == 2
  460. * handoff to fabric module.
  461. */
  462. cmd->se_lun = NULL;
  463. /*
  464. * Some fabric modules like tcm_loop can release
  465. * their internally allocated I/O reference now and
  466. * struct se_cmd now.
  467. *
  468. * Fabric modules are expected to return '1' here if the
  469. * se_cmd being passed is released at this point,
  470. * or zero if not being released.
  471. */
  472. if (cmd->se_tfo->check_stop_free != NULL) {
  473. spin_unlock_irqrestore(
  474. &cmd->t_state_lock, flags);
  475. return cmd->se_tfo->check_stop_free(cmd);
  476. }
  477. }
  478. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  479. return 0;
  480. } else if (t_state)
  481. cmd->t_state = t_state;
  482. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  483. return 0;
  484. }
  485. static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
  486. {
  487. return transport_cmd_check_stop(cmd, 2, 0);
  488. }
  489. static void transport_lun_remove_cmd(struct se_cmd *cmd)
  490. {
  491. struct se_lun *lun = cmd->se_lun;
  492. unsigned long flags;
  493. if (!lun)
  494. return;
  495. spin_lock_irqsave(&cmd->t_state_lock, flags);
  496. if (!atomic_read(&cmd->transport_dev_active)) {
  497. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  498. goto check_lun;
  499. }
  500. atomic_set(&cmd->transport_dev_active, 0);
  501. transport_all_task_dev_remove_state(cmd);
  502. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  503. check_lun:
  504. spin_lock_irqsave(&lun->lun_cmd_lock, flags);
  505. if (atomic_read(&cmd->transport_lun_active)) {
  506. list_del(&cmd->se_lun_node);
  507. atomic_set(&cmd->transport_lun_active, 0);
  508. #if 0
  509. pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
  510. cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
  511. #endif
  512. }
  513. spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
  514. }
  515. void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
  516. {
  517. if (!cmd->se_tmr_req)
  518. transport_lun_remove_cmd(cmd);
  519. if (transport_cmd_check_stop_to_fabric(cmd))
  520. return;
  521. if (remove) {
  522. transport_remove_cmd_from_queue(cmd);
  523. transport_put_cmd(cmd);
  524. }
  525. }
  526. static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
  527. bool at_head)
  528. {
  529. struct se_device *dev = cmd->se_dev;
  530. struct se_queue_obj *qobj = &dev->dev_queue_obj;
  531. unsigned long flags;
  532. if (t_state) {
  533. spin_lock_irqsave(&cmd->t_state_lock, flags);
  534. cmd->t_state = t_state;
  535. atomic_set(&cmd->t_transport_active, 1);
  536. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  537. }
  538. spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
  539. /* If the cmd is already on the list, remove it before we add it */
  540. if (!list_empty(&cmd->se_queue_node))
  541. list_del(&cmd->se_queue_node);
  542. else
  543. atomic_inc(&qobj->queue_cnt);
  544. if (at_head)
  545. list_add(&cmd->se_queue_node, &qobj->qobj_list);
  546. else
  547. list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
  548. atomic_set(&cmd->t_transport_queue_active, 1);
  549. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  550. wake_up_interruptible(&qobj->thread_wq);
  551. }
  552. static struct se_cmd *
  553. transport_get_cmd_from_queue(struct se_queue_obj *qobj)
  554. {
  555. struct se_cmd *cmd;
  556. unsigned long flags;
  557. spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
  558. if (list_empty(&qobj->qobj_list)) {
  559. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  560. return NULL;
  561. }
  562. cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
  563. atomic_set(&cmd->t_transport_queue_active, 0);
  564. list_del_init(&cmd->se_queue_node);
  565. atomic_dec(&qobj->queue_cnt);
  566. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  567. return cmd;
  568. }
  569. static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
  570. {
  571. struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
  572. unsigned long flags;
  573. spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
  574. if (!atomic_read(&cmd->t_transport_queue_active)) {
  575. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  576. return;
  577. }
  578. atomic_set(&cmd->t_transport_queue_active, 0);
  579. atomic_dec(&qobj->queue_cnt);
  580. list_del_init(&cmd->se_queue_node);
  581. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  582. if (atomic_read(&cmd->t_transport_queue_active)) {
  583. pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
  584. cmd->se_tfo->get_task_tag(cmd),
  585. atomic_read(&cmd->t_transport_queue_active));
  586. }
  587. }
  588. /*
  589. * Completion function used by TCM subsystem plugins (such as FILEIO)
  590. * for queueing up response from struct se_subsystem_api->do_task()
  591. */
  592. void transport_complete_sync_cache(struct se_cmd *cmd, int good)
  593. {
  594. struct se_task *task = list_entry(cmd->t_task_list.next,
  595. struct se_task, t_list);
  596. if (good) {
  597. cmd->scsi_status = SAM_STAT_GOOD;
  598. task->task_scsi_status = GOOD;
  599. } else {
  600. task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
  601. task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
  602. task->task_se_cmd->transport_error_status =
  603. PYX_TRANSPORT_ILLEGAL_REQUEST;
  604. }
  605. transport_complete_task(task, good);
  606. }
  607. EXPORT_SYMBOL(transport_complete_sync_cache);
  608. static void target_complete_failure_work(struct work_struct *work)
  609. {
  610. struct se_cmd *cmd = container_of(work, struct se_cmd, work);
  611. transport_generic_request_failure(cmd, 1, 1);
  612. }
  613. /* transport_complete_task():
  614. *
  615. * Called from interrupt and non interrupt context depending
  616. * on the transport plugin.
  617. */
  618. void transport_complete_task(struct se_task *task, int success)
  619. {
  620. struct se_cmd *cmd = task->task_se_cmd;
  621. struct se_device *dev = cmd->se_dev;
  622. unsigned long flags;
  623. #if 0
  624. pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
  625. cmd->t_task_cdb[0], dev);
  626. #endif
  627. if (dev)
  628. atomic_inc(&dev->depth_left);
  629. spin_lock_irqsave(&cmd->t_state_lock, flags);
  630. task->task_flags &= ~TF_ACTIVE;
  631. /*
  632. * See if any sense data exists, if so set the TASK_SENSE flag.
  633. * Also check for any other post completion work that needs to be
  634. * done by the plugins.
  635. */
  636. if (dev && dev->transport->transport_complete) {
  637. if (dev->transport->transport_complete(task) != 0) {
  638. cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
  639. task->task_sense = 1;
  640. success = 1;
  641. }
  642. }
  643. /*
  644. * See if we are waiting for outstanding struct se_task
  645. * to complete for an exception condition
  646. */
  647. if (task->task_flags & TF_REQUEST_STOP) {
  648. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  649. complete(&task->task_stop_comp);
  650. return;
  651. }
  652. if (!success)
  653. cmd->t_tasks_failed = 1;
  654. /*
  655. * Decrement the outstanding t_task_cdbs_left count. The last
  656. * struct se_task from struct se_cmd will complete itself into the
  657. * device queue depending upon int success.
  658. */
  659. if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
  660. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  661. return;
  662. }
  663. if (cmd->t_tasks_failed) {
  664. if (!task->task_error_status) {
  665. task->task_error_status =
  666. PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
  667. cmd->transport_error_status =
  668. PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
  669. }
  670. INIT_WORK(&cmd->work, target_complete_failure_work);
  671. } else {
  672. atomic_set(&cmd->t_transport_complete, 1);
  673. INIT_WORK(&cmd->work, target_complete_ok_work);
  674. }
  675. cmd->t_state = TRANSPORT_COMPLETE;
  676. atomic_set(&cmd->t_transport_active, 1);
  677. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  678. queue_work(target_completion_wq, &cmd->work);
  679. }
  680. EXPORT_SYMBOL(transport_complete_task);
  681. /*
  682. * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
  683. * struct se_task list are ready to be added to the active execution list
  684. * struct se_device
  685. * Called with se_dev_t->execute_task_lock called.
  686. */
  687. static inline int transport_add_task_check_sam_attr(
  688. struct se_task *task,
  689. struct se_task *task_prev,
  690. struct se_device *dev)
  691. {
  692. /*
  693. * No SAM Task attribute emulation enabled, add to tail of
  694. * execution queue
  695. */
  696. if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
  697. list_add_tail(&task->t_execute_list, &dev->execute_task_list);
  698. return 0;
  699. }
  700. /*
  701. * HEAD_OF_QUEUE attribute for received CDB, which means
  702. * the first task that is associated with a struct se_cmd goes to
  703. * head of the struct se_device->execute_task_list, and task_prev
  704. * after that for each subsequent task
  705. */
  706. if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
  707. list_add(&task->t_execute_list,
  708. (task_prev != NULL) ?
  709. &task_prev->t_execute_list :
  710. &dev->execute_task_list);
  711. pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
  712. " in execution queue\n",
  713. task->task_se_cmd->t_task_cdb[0]);
  714. return 1;
  715. }
  716. /*
  717. * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
  718. * transitioned from Dermant -> Active state, and are added to the end
  719. * of the struct se_device->execute_task_list
  720. */
  721. list_add_tail(&task->t_execute_list, &dev->execute_task_list);
  722. return 0;
  723. }
  724. /* __transport_add_task_to_execute_queue():
  725. *
  726. * Called with se_dev_t->execute_task_lock called.
  727. */
  728. static void __transport_add_task_to_execute_queue(
  729. struct se_task *task,
  730. struct se_task *task_prev,
  731. struct se_device *dev)
  732. {
  733. int head_of_queue;
  734. head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
  735. atomic_inc(&dev->execute_tasks);
  736. if (atomic_read(&task->task_state_active))
  737. return;
  738. /*
  739. * Determine if this task needs to go to HEAD_OF_QUEUE for the
  740. * state list as well. Running with SAM Task Attribute emulation
  741. * will always return head_of_queue == 0 here
  742. */
  743. if (head_of_queue)
  744. list_add(&task->t_state_list, (task_prev) ?
  745. &task_prev->t_state_list :
  746. &dev->state_task_list);
  747. else
  748. list_add_tail(&task->t_state_list, &dev->state_task_list);
  749. atomic_set(&task->task_state_active, 1);
  750. pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
  751. task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
  752. task, dev);
  753. }
  754. static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
  755. {
  756. struct se_device *dev = cmd->se_dev;
  757. struct se_task *task;
  758. unsigned long flags;
  759. spin_lock_irqsave(&cmd->t_state_lock, flags);
  760. list_for_each_entry(task, &cmd->t_task_list, t_list) {
  761. if (atomic_read(&task->task_state_active))
  762. continue;
  763. spin_lock(&dev->execute_task_lock);
  764. list_add_tail(&task->t_state_list, &dev->state_task_list);
  765. atomic_set(&task->task_state_active, 1);
  766. pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
  767. task->task_se_cmd->se_tfo->get_task_tag(
  768. task->task_se_cmd), task, dev);
  769. spin_unlock(&dev->execute_task_lock);
  770. }
  771. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  772. }
  773. static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
  774. {
  775. struct se_device *dev = cmd->se_dev;
  776. struct se_task *task, *task_prev = NULL;
  777. unsigned long flags;
  778. spin_lock_irqsave(&dev->execute_task_lock, flags);
  779. list_for_each_entry(task, &cmd->t_task_list, t_list) {
  780. if (!list_empty(&task->t_execute_list))
  781. continue;
  782. /*
  783. * __transport_add_task_to_execute_queue() handles the
  784. * SAM Task Attribute emulation if enabled
  785. */
  786. __transport_add_task_to_execute_queue(task, task_prev, dev);
  787. task_prev = task;
  788. }
  789. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  790. }
  791. void __transport_remove_task_from_execute_queue(struct se_task *task,
  792. struct se_device *dev)
  793. {
  794. list_del_init(&task->t_execute_list);
  795. atomic_dec(&dev->execute_tasks);
  796. }
  797. void transport_remove_task_from_execute_queue(
  798. struct se_task *task,
  799. struct se_device *dev)
  800. {
  801. unsigned long flags;
  802. if (WARN_ON(list_empty(&task->t_execute_list)))
  803. return;
  804. spin_lock_irqsave(&dev->execute_task_lock, flags);
  805. __transport_remove_task_from_execute_queue(task, dev);
  806. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  807. }
  808. /*
  809. * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
  810. */
  811. static void target_qf_do_work(struct work_struct *work)
  812. {
  813. struct se_device *dev = container_of(work, struct se_device,
  814. qf_work_queue);
  815. LIST_HEAD(qf_cmd_list);
  816. struct se_cmd *cmd, *cmd_tmp;
  817. spin_lock_irq(&dev->qf_cmd_lock);
  818. list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
  819. spin_unlock_irq(&dev->qf_cmd_lock);
  820. list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
  821. list_del(&cmd->se_qf_node);
  822. atomic_dec(&dev->dev_qf_count);
  823. smp_mb__after_atomic_dec();
  824. pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
  825. " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
  826. (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
  827. (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
  828. : "UNKNOWN");
  829. transport_add_cmd_to_queue(cmd, cmd->t_state, true);
  830. }
  831. }
  832. unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
  833. {
  834. switch (cmd->data_direction) {
  835. case DMA_NONE:
  836. return "NONE";
  837. case DMA_FROM_DEVICE:
  838. return "READ";
  839. case DMA_TO_DEVICE:
  840. return "WRITE";
  841. case DMA_BIDIRECTIONAL:
  842. return "BIDI";
  843. default:
  844. break;
  845. }
  846. return "UNKNOWN";
  847. }
  848. void transport_dump_dev_state(
  849. struct se_device *dev,
  850. char *b,
  851. int *bl)
  852. {
  853. *bl += sprintf(b + *bl, "Status: ");
  854. switch (dev->dev_status) {
  855. case TRANSPORT_DEVICE_ACTIVATED:
  856. *bl += sprintf(b + *bl, "ACTIVATED");
  857. break;
  858. case TRANSPORT_DEVICE_DEACTIVATED:
  859. *bl += sprintf(b + *bl, "DEACTIVATED");
  860. break;
  861. case TRANSPORT_DEVICE_SHUTDOWN:
  862. *bl += sprintf(b + *bl, "SHUTDOWN");
  863. break;
  864. case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
  865. case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
  866. *bl += sprintf(b + *bl, "OFFLINE");
  867. break;
  868. default:
  869. *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
  870. break;
  871. }
  872. *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
  873. atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
  874. dev->queue_depth);
  875. *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
  876. dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
  877. *bl += sprintf(b + *bl, " ");
  878. }
  879. void transport_dump_vpd_proto_id(
  880. struct t10_vpd *vpd,
  881. unsigned char *p_buf,
  882. int p_buf_len)
  883. {
  884. unsigned char buf[VPD_TMP_BUF_SIZE];
  885. int len;
  886. memset(buf, 0, VPD_TMP_BUF_SIZE);
  887. len = sprintf(buf, "T10 VPD Protocol Identifier: ");
  888. switch (vpd->protocol_identifier) {
  889. case 0x00:
  890. sprintf(buf+len, "Fibre Channel\n");
  891. break;
  892. case 0x10:
  893. sprintf(buf+len, "Parallel SCSI\n");
  894. break;
  895. case 0x20:
  896. sprintf(buf+len, "SSA\n");
  897. break;
  898. case 0x30:
  899. sprintf(buf+len, "IEEE 1394\n");
  900. break;
  901. case 0x40:
  902. sprintf(buf+len, "SCSI Remote Direct Memory Access"
  903. " Protocol\n");
  904. break;
  905. case 0x50:
  906. sprintf(buf+len, "Internet SCSI (iSCSI)\n");
  907. break;
  908. case 0x60:
  909. sprintf(buf+len, "SAS Serial SCSI Protocol\n");
  910. break;
  911. case 0x70:
  912. sprintf(buf+len, "Automation/Drive Interface Transport"
  913. " Protocol\n");
  914. break;
  915. case 0x80:
  916. sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
  917. break;
  918. default:
  919. sprintf(buf+len, "Unknown 0x%02x\n",
  920. vpd->protocol_identifier);
  921. break;
  922. }
  923. if (p_buf)
  924. strncpy(p_buf, buf, p_buf_len);
  925. else
  926. pr_debug("%s", buf);
  927. }
  928. void
  929. transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
  930. {
  931. /*
  932. * Check if the Protocol Identifier Valid (PIV) bit is set..
  933. *
  934. * from spc3r23.pdf section 7.5.1
  935. */
  936. if (page_83[1] & 0x80) {
  937. vpd->protocol_identifier = (page_83[0] & 0xf0);
  938. vpd->protocol_identifier_set = 1;
  939. transport_dump_vpd_proto_id(vpd, NULL, 0);
  940. }
  941. }
  942. EXPORT_SYMBOL(transport_set_vpd_proto_id);
  943. int transport_dump_vpd_assoc(
  944. struct t10_vpd *vpd,
  945. unsigned char *p_buf,
  946. int p_buf_len)
  947. {
  948. unsigned char buf[VPD_TMP_BUF_SIZE];
  949. int ret = 0;
  950. int len;
  951. memset(buf, 0, VPD_TMP_BUF_SIZE);
  952. len = sprintf(buf, "T10 VPD Identifier Association: ");
  953. switch (vpd->association) {
  954. case 0x00:
  955. sprintf(buf+len, "addressed logical unit\n");
  956. break;
  957. case 0x10:
  958. sprintf(buf+len, "target port\n");
  959. break;
  960. case 0x20:
  961. sprintf(buf+len, "SCSI target device\n");
  962. break;
  963. default:
  964. sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
  965. ret = -EINVAL;
  966. break;
  967. }
  968. if (p_buf)
  969. strncpy(p_buf, buf, p_buf_len);
  970. else
  971. pr_debug("%s", buf);
  972. return ret;
  973. }
  974. int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
  975. {
  976. /*
  977. * The VPD identification association..
  978. *
  979. * from spc3r23.pdf Section 7.6.3.1 Table 297
  980. */
  981. vpd->association = (page_83[1] & 0x30);
  982. return transport_dump_vpd_assoc(vpd, NULL, 0);
  983. }
  984. EXPORT_SYMBOL(transport_set_vpd_assoc);
  985. int transport_dump_vpd_ident_type(
  986. struct t10_vpd *vpd,
  987. unsigned char *p_buf,
  988. int p_buf_len)
  989. {
  990. unsigned char buf[VPD_TMP_BUF_SIZE];
  991. int ret = 0;
  992. int len;
  993. memset(buf, 0, VPD_TMP_BUF_SIZE);
  994. len = sprintf(buf, "T10 VPD Identifier Type: ");
  995. switch (vpd->device_identifier_type) {
  996. case 0x00:
  997. sprintf(buf+len, "Vendor specific\n");
  998. break;
  999. case 0x01:
  1000. sprintf(buf+len, "T10 Vendor ID based\n");
  1001. break;
  1002. case 0x02:
  1003. sprintf(buf+len, "EUI-64 based\n");
  1004. break;
  1005. case 0x03:
  1006. sprintf(buf+len, "NAA\n");
  1007. break;
  1008. case 0x04:
  1009. sprintf(buf+len, "Relative target port identifier\n");
  1010. break;
  1011. case 0x08:
  1012. sprintf(buf+len, "SCSI name string\n");
  1013. break;
  1014. default:
  1015. sprintf(buf+len, "Unsupported: 0x%02x\n",
  1016. vpd->device_identifier_type);
  1017. ret = -EINVAL;
  1018. break;
  1019. }
  1020. if (p_buf) {
  1021. if (p_buf_len < strlen(buf)+1)
  1022. return -EINVAL;
  1023. strncpy(p_buf, buf, p_buf_len);
  1024. } else {
  1025. pr_debug("%s", buf);
  1026. }
  1027. return ret;
  1028. }
  1029. int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
  1030. {
  1031. /*
  1032. * The VPD identifier type..
  1033. *
  1034. * from spc3r23.pdf Section 7.6.3.1 Table 298
  1035. */
  1036. vpd->device_identifier_type = (page_83[1] & 0x0f);
  1037. return transport_dump_vpd_ident_type(vpd, NULL, 0);
  1038. }
  1039. EXPORT_SYMBOL(transport_set_vpd_ident_type);
  1040. int transport_dump_vpd_ident(
  1041. struct t10_vpd *vpd,
  1042. unsigned char *p_buf,
  1043. int p_buf_len)
  1044. {
  1045. unsigned char buf[VPD_TMP_BUF_SIZE];
  1046. int ret = 0;
  1047. memset(buf, 0, VPD_TMP_BUF_SIZE);
  1048. switch (vpd->device_identifier_code_set) {
  1049. case 0x01: /* Binary */
  1050. sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
  1051. &vpd->device_identifier[0]);
  1052. break;
  1053. case 0x02: /* ASCII */
  1054. sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
  1055. &vpd->device_identifier[0]);
  1056. break;
  1057. case 0x03: /* UTF-8 */
  1058. sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
  1059. &vpd->device_identifier[0]);
  1060. break;
  1061. default:
  1062. sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
  1063. " 0x%02x", vpd->device_identifier_code_set);
  1064. ret = -EINVAL;
  1065. break;
  1066. }
  1067. if (p_buf)
  1068. strncpy(p_buf, buf, p_buf_len);
  1069. else
  1070. pr_debug("%s", buf);
  1071. return ret;
  1072. }
  1073. int
  1074. transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
  1075. {
  1076. static const char hex_str[] = "0123456789abcdef";
  1077. int j = 0, i = 4; /* offset to start of the identifer */
  1078. /*
  1079. * The VPD Code Set (encoding)
  1080. *
  1081. * from spc3r23.pdf Section 7.6.3.1 Table 296
  1082. */
  1083. vpd->device_identifier_code_set = (page_83[0] & 0x0f);
  1084. switch (vpd->device_identifier_code_set) {
  1085. case 0x01: /* Binary */
  1086. vpd->device_identifier[j++] =
  1087. hex_str[vpd->device_identifier_type];
  1088. while (i < (4 + page_83[3])) {
  1089. vpd->device_identifier[j++] =
  1090. hex_str[(page_83[i] & 0xf0) >> 4];
  1091. vpd->device_identifier[j++] =
  1092. hex_str[page_83[i] & 0x0f];
  1093. i++;
  1094. }
  1095. break;
  1096. case 0x02: /* ASCII */
  1097. case 0x03: /* UTF-8 */
  1098. while (i < (4 + page_83[3]))
  1099. vpd->device_identifier[j++] = page_83[i++];
  1100. break;
  1101. default:
  1102. break;
  1103. }
  1104. return transport_dump_vpd_ident(vpd, NULL, 0);
  1105. }
  1106. EXPORT_SYMBOL(transport_set_vpd_ident);
  1107. static void core_setup_task_attr_emulation(struct se_device *dev)
  1108. {
  1109. /*
  1110. * If this device is from Target_Core_Mod/pSCSI, disable the
  1111. * SAM Task Attribute emulation.
  1112. *
  1113. * This is currently not available in upsream Linux/SCSI Target
  1114. * mode code, and is assumed to be disabled while using TCM/pSCSI.
  1115. */
  1116. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1117. dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
  1118. return;
  1119. }
  1120. dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
  1121. pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
  1122. " device\n", dev->transport->name,
  1123. dev->transport->get_device_rev(dev));
  1124. }
  1125. static void scsi_dump_inquiry(struct se_device *dev)
  1126. {
  1127. struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
  1128. int i, device_type;
  1129. /*
  1130. * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
  1131. */
  1132. pr_debug(" Vendor: ");
  1133. for (i = 0; i < 8; i++)
  1134. if (wwn->vendor[i] >= 0x20)
  1135. pr_debug("%c", wwn->vendor[i]);
  1136. else
  1137. pr_debug(" ");
  1138. pr_debug(" Model: ");
  1139. for (i = 0; i < 16; i++)
  1140. if (wwn->model[i] >= 0x20)
  1141. pr_debug("%c", wwn->model[i]);
  1142. else
  1143. pr_debug(" ");
  1144. pr_debug(" Revision: ");
  1145. for (i = 0; i < 4; i++)
  1146. if (wwn->revision[i] >= 0x20)
  1147. pr_debug("%c", wwn->revision[i]);
  1148. else
  1149. pr_debug(" ");
  1150. pr_debug("\n");
  1151. device_type = dev->transport->get_device_type(dev);
  1152. pr_debug(" Type: %s ", scsi_device_type(device_type));
  1153. pr_debug(" ANSI SCSI revision: %02x\n",
  1154. dev->transport->get_device_rev(dev));
  1155. }
  1156. struct se_device *transport_add_device_to_core_hba(
  1157. struct se_hba *hba,
  1158. struct se_subsystem_api *transport,
  1159. struct se_subsystem_dev *se_dev,
  1160. u32 device_flags,
  1161. void *transport_dev,
  1162. struct se_dev_limits *dev_limits,
  1163. const char *inquiry_prod,
  1164. const char *inquiry_rev)
  1165. {
  1166. int force_pt;
  1167. struct se_device *dev;
  1168. dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
  1169. if (!dev) {
  1170. pr_err("Unable to allocate memory for se_dev_t\n");
  1171. return NULL;
  1172. }
  1173. transport_init_queue_obj(&dev->dev_queue_obj);
  1174. dev->dev_flags = device_flags;
  1175. dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
  1176. dev->dev_ptr = transport_dev;
  1177. dev->se_hba = hba;
  1178. dev->se_sub_dev = se_dev;
  1179. dev->transport = transport;
  1180. atomic_set(&dev->active_cmds, 0);
  1181. INIT_LIST_HEAD(&dev->dev_list);
  1182. INIT_LIST_HEAD(&dev->dev_sep_list);
  1183. INIT_LIST_HEAD(&dev->dev_tmr_list);
  1184. INIT_LIST_HEAD(&dev->execute_task_list);
  1185. INIT_LIST_HEAD(&dev->delayed_cmd_list);
  1186. INIT_LIST_HEAD(&dev->ordered_cmd_list);
  1187. INIT_LIST_HEAD(&dev->state_task_list);
  1188. INIT_LIST_HEAD(&dev->qf_cmd_list);
  1189. spin_lock_init(&dev->execute_task_lock);
  1190. spin_lock_init(&dev->delayed_cmd_lock);
  1191. spin_lock_init(&dev->ordered_cmd_lock);
  1192. spin_lock_init(&dev->state_task_lock);
  1193. spin_lock_init(&dev->dev_alua_lock);
  1194. spin_lock_init(&dev->dev_reservation_lock);
  1195. spin_lock_init(&dev->dev_status_lock);
  1196. spin_lock_init(&dev->dev_status_thr_lock);
  1197. spin_lock_init(&dev->se_port_lock);
  1198. spin_lock_init(&dev->se_tmr_lock);
  1199. spin_lock_init(&dev->qf_cmd_lock);
  1200. dev->queue_depth = dev_limits->queue_depth;
  1201. atomic_set(&dev->depth_left, dev->queue_depth);
  1202. atomic_set(&dev->dev_ordered_id, 0);
  1203. se_dev_set_default_attribs(dev, dev_limits);
  1204. dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
  1205. dev->creation_time = get_jiffies_64();
  1206. spin_lock_init(&dev->stats_lock);
  1207. spin_lock(&hba->device_lock);
  1208. list_add_tail(&dev->dev_list, &hba->hba_dev_list);
  1209. hba->dev_count++;
  1210. spin_unlock(&hba->device_lock);
  1211. /*
  1212. * Setup the SAM Task Attribute emulation for struct se_device
  1213. */
  1214. core_setup_task_attr_emulation(dev);
  1215. /*
  1216. * Force PR and ALUA passthrough emulation with internal object use.
  1217. */
  1218. force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
  1219. /*
  1220. * Setup the Reservations infrastructure for struct se_device
  1221. */
  1222. core_setup_reservations(dev, force_pt);
  1223. /*
  1224. * Setup the Asymmetric Logical Unit Assignment for struct se_device
  1225. */
  1226. if (core_setup_alua(dev, force_pt) < 0)
  1227. goto out;
  1228. /*
  1229. * Startup the struct se_device processing thread
  1230. */
  1231. dev->process_thread = kthread_run(transport_processing_thread, dev,
  1232. "LIO_%s", dev->transport->name);
  1233. if (IS_ERR(dev->process_thread)) {
  1234. pr_err("Unable to create kthread: LIO_%s\n",
  1235. dev->transport->name);
  1236. goto out;
  1237. }
  1238. /*
  1239. * Setup work_queue for QUEUE_FULL
  1240. */
  1241. INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
  1242. /*
  1243. * Preload the initial INQUIRY const values if we are doing
  1244. * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
  1245. * passthrough because this is being provided by the backend LLD.
  1246. * This is required so that transport_get_inquiry() copies these
  1247. * originals once back into DEV_T10_WWN(dev) for the virtual device
  1248. * setup.
  1249. */
  1250. if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
  1251. if (!inquiry_prod || !inquiry_rev) {
  1252. pr_err("All non TCM/pSCSI plugins require"
  1253. " INQUIRY consts\n");
  1254. goto out;
  1255. }
  1256. strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
  1257. strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
  1258. strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
  1259. }
  1260. scsi_dump_inquiry(dev);
  1261. return dev;
  1262. out:
  1263. kthread_stop(dev->process_thread);
  1264. spin_lock(&hba->device_lock);
  1265. list_del(&dev->dev_list);
  1266. hba->dev_count--;
  1267. spin_unlock(&hba->device_lock);
  1268. se_release_vpd_for_dev(dev);
  1269. kfree(dev);
  1270. return NULL;
  1271. }
  1272. EXPORT_SYMBOL(transport_add_device_to_core_hba);
  1273. /* transport_generic_prepare_cdb():
  1274. *
  1275. * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
  1276. * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
  1277. * The point of this is since we are mapping iSCSI LUNs to
  1278. * SCSI Target IDs having a non-zero LUN in the CDB will throw the
  1279. * devices and HBAs for a loop.
  1280. */
  1281. static inline void transport_generic_prepare_cdb(
  1282. unsigned char *cdb)
  1283. {
  1284. switch (cdb[0]) {
  1285. case READ_10: /* SBC - RDProtect */
  1286. case READ_12: /* SBC - RDProtect */
  1287. case READ_16: /* SBC - RDProtect */
  1288. case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
  1289. case VERIFY: /* SBC - VRProtect */
  1290. case VERIFY_16: /* SBC - VRProtect */
  1291. case WRITE_VERIFY: /* SBC - VRProtect */
  1292. case WRITE_VERIFY_12: /* SBC - VRProtect */
  1293. break;
  1294. default:
  1295. cdb[1] &= 0x1f; /* clear logical unit number */
  1296. break;
  1297. }
  1298. }
  1299. static struct se_task *
  1300. transport_generic_get_task(struct se_cmd *cmd,
  1301. enum dma_data_direction data_direction)
  1302. {
  1303. struct se_task *task;
  1304. struct se_device *dev = cmd->se_dev;
  1305. task = dev->transport->alloc_task(cmd->t_task_cdb);
  1306. if (!task) {
  1307. pr_err("Unable to allocate struct se_task\n");
  1308. return NULL;
  1309. }
  1310. INIT_LIST_HEAD(&task->t_list);
  1311. INIT_LIST_HEAD(&task->t_execute_list);
  1312. INIT_LIST_HEAD(&task->t_state_list);
  1313. init_completion(&task->task_stop_comp);
  1314. task->task_se_cmd = cmd;
  1315. task->task_data_direction = data_direction;
  1316. return task;
  1317. }
  1318. static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
  1319. /*
  1320. * Used by fabric modules containing a local struct se_cmd within their
  1321. * fabric dependent per I/O descriptor.
  1322. */
  1323. void transport_init_se_cmd(
  1324. struct se_cmd *cmd,
  1325. struct target_core_fabric_ops *tfo,
  1326. struct se_session *se_sess,
  1327. u32 data_length,
  1328. int data_direction,
  1329. int task_attr,
  1330. unsigned char *sense_buffer)
  1331. {
  1332. INIT_LIST_HEAD(&cmd->se_lun_node);
  1333. INIT_LIST_HEAD(&cmd->se_delayed_node);
  1334. INIT_LIST_HEAD(&cmd->se_ordered_node);
  1335. INIT_LIST_HEAD(&cmd->se_qf_node);
  1336. INIT_LIST_HEAD(&cmd->se_queue_node);
  1337. INIT_LIST_HEAD(&cmd->se_cmd_list);
  1338. INIT_LIST_HEAD(&cmd->t_task_list);
  1339. init_completion(&cmd->transport_lun_fe_stop_comp);
  1340. init_completion(&cmd->transport_lun_stop_comp);
  1341. init_completion(&cmd->t_transport_stop_comp);
  1342. init_completion(&cmd->cmd_wait_comp);
  1343. spin_lock_init(&cmd->t_state_lock);
  1344. atomic_set(&cmd->transport_dev_active, 1);
  1345. cmd->se_tfo = tfo;
  1346. cmd->se_sess = se_sess;
  1347. cmd->data_length = data_length;
  1348. cmd->data_direction = data_direction;
  1349. cmd->sam_task_attr = task_attr;
  1350. cmd->sense_buffer = sense_buffer;
  1351. }
  1352. EXPORT_SYMBOL(transport_init_se_cmd);
  1353. static int transport_check_alloc_task_attr(struct se_cmd *cmd)
  1354. {
  1355. /*
  1356. * Check if SAM Task Attribute emulation is enabled for this
  1357. * struct se_device storage object
  1358. */
  1359. if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
  1360. return 0;
  1361. if (cmd->sam_task_attr == MSG_ACA_TAG) {
  1362. pr_debug("SAM Task Attribute ACA"
  1363. " emulation is not supported\n");
  1364. return -EINVAL;
  1365. }
  1366. /*
  1367. * Used to determine when ORDERED commands should go from
  1368. * Dormant to Active status.
  1369. */
  1370. cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
  1371. smp_mb__after_atomic_inc();
  1372. pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
  1373. cmd->se_ordered_id, cmd->sam_task_attr,
  1374. cmd->se_dev->transport->name);
  1375. return 0;
  1376. }
  1377. /* transport_generic_allocate_tasks():
  1378. *
  1379. * Called from fabric RX Thread.
  1380. */
  1381. int transport_generic_allocate_tasks(
  1382. struct se_cmd *cmd,
  1383. unsigned char *cdb)
  1384. {
  1385. int ret;
  1386. transport_generic_prepare_cdb(cdb);
  1387. /*
  1388. * Ensure that the received CDB is less than the max (252 + 8) bytes
  1389. * for VARIABLE_LENGTH_CMD
  1390. */
  1391. if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
  1392. pr_err("Received SCSI CDB with command_size: %d that"
  1393. " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
  1394. scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
  1395. return -EINVAL;
  1396. }
  1397. /*
  1398. * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
  1399. * allocate the additional extended CDB buffer now.. Otherwise
  1400. * setup the pointer from __t_task_cdb to t_task_cdb.
  1401. */
  1402. if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
  1403. cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
  1404. GFP_KERNEL);
  1405. if (!cmd->t_task_cdb) {
  1406. pr_err("Unable to allocate cmd->t_task_cdb"
  1407. " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
  1408. scsi_command_size(cdb),
  1409. (unsigned long)sizeof(cmd->__t_task_cdb));
  1410. return -ENOMEM;
  1411. }
  1412. } else
  1413. cmd->t_task_cdb = &cmd->__t_task_cdb[0];
  1414. /*
  1415. * Copy the original CDB into cmd->
  1416. */
  1417. memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
  1418. /*
  1419. * Setup the received CDB based on SCSI defined opcodes and
  1420. * perform unit attention, persistent reservations and ALUA
  1421. * checks for virtual device backends. The cmd->t_task_cdb
  1422. * pointer is expected to be setup before we reach this point.
  1423. */
  1424. ret = transport_generic_cmd_sequencer(cmd, cdb);
  1425. if (ret < 0)
  1426. return ret;
  1427. /*
  1428. * Check for SAM Task Attribute Emulation
  1429. */
  1430. if (transport_check_alloc_task_attr(cmd) < 0) {
  1431. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  1432. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  1433. return -EINVAL;
  1434. }
  1435. spin_lock(&cmd->se_lun->lun_sep_lock);
  1436. if (cmd->se_lun->lun_sep)
  1437. cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
  1438. spin_unlock(&cmd->se_lun->lun_sep_lock);
  1439. return 0;
  1440. }
  1441. EXPORT_SYMBOL(transport_generic_allocate_tasks);
  1442. /*
  1443. * Used by fabric module frontends to queue tasks directly.
  1444. * Many only be used from process context only
  1445. */
  1446. int transport_handle_cdb_direct(
  1447. struct se_cmd *cmd)
  1448. {
  1449. int ret;
  1450. if (!cmd->se_lun) {
  1451. dump_stack();
  1452. pr_err("cmd->se_lun is NULL\n");
  1453. return -EINVAL;
  1454. }
  1455. if (in_interrupt()) {
  1456. dump_stack();
  1457. pr_err("transport_generic_handle_cdb cannot be called"
  1458. " from interrupt context\n");
  1459. return -EINVAL;
  1460. }
  1461. /*
  1462. * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
  1463. * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
  1464. * in existing usage to ensure that outstanding descriptors are handled
  1465. * correctly during shutdown via transport_wait_for_tasks()
  1466. *
  1467. * Also, we don't take cmd->t_state_lock here as we only expect
  1468. * this to be called for initial descriptor submission.
  1469. */
  1470. cmd->t_state = TRANSPORT_NEW_CMD;
  1471. atomic_set(&cmd->t_transport_active, 1);
  1472. /*
  1473. * transport_generic_new_cmd() is already handling QUEUE_FULL,
  1474. * so follow TRANSPORT_NEW_CMD processing thread context usage
  1475. * and call transport_generic_request_failure() if necessary..
  1476. */
  1477. ret = transport_generic_new_cmd(cmd);
  1478. if (ret < 0) {
  1479. cmd->transport_error_status = ret;
  1480. transport_generic_request_failure(cmd, 0,
  1481. (cmd->data_direction != DMA_TO_DEVICE));
  1482. }
  1483. return 0;
  1484. }
  1485. EXPORT_SYMBOL(transport_handle_cdb_direct);
  1486. /*
  1487. * Used by fabric module frontends defining a TFO->new_cmd_map() caller
  1488. * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
  1489. * complete setup in TCM process context w/ TFO->new_cmd_map().
  1490. */
  1491. int transport_generic_handle_cdb_map(
  1492. struct se_cmd *cmd)
  1493. {
  1494. if (!cmd->se_lun) {
  1495. dump_stack();
  1496. pr_err("cmd->se_lun is NULL\n");
  1497. return -EINVAL;
  1498. }
  1499. transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
  1500. return 0;
  1501. }
  1502. EXPORT_SYMBOL(transport_generic_handle_cdb_map);
  1503. /* transport_generic_handle_data():
  1504. *
  1505. *
  1506. */
  1507. int transport_generic_handle_data(
  1508. struct se_cmd *cmd)
  1509. {
  1510. /*
  1511. * For the software fabric case, then we assume the nexus is being
  1512. * failed/shutdown when signals are pending from the kthread context
  1513. * caller, so we return a failure. For the HW target mode case running
  1514. * in interrupt code, the signal_pending() check is skipped.
  1515. */
  1516. if (!in_interrupt() && signal_pending(current))
  1517. return -EPERM;
  1518. /*
  1519. * If the received CDB has aleady been ABORTED by the generic
  1520. * target engine, we now call transport_check_aborted_status()
  1521. * to queue any delated TASK_ABORTED status for the received CDB to the
  1522. * fabric module as we are expecting no further incoming DATA OUT
  1523. * sequences at this point.
  1524. */
  1525. if (transport_check_aborted_status(cmd, 1) != 0)
  1526. return 0;
  1527. transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
  1528. return 0;
  1529. }
  1530. EXPORT_SYMBOL(transport_generic_handle_data);
  1531. /* transport_generic_handle_tmr():
  1532. *
  1533. *
  1534. */
  1535. int transport_generic_handle_tmr(
  1536. struct se_cmd *cmd)
  1537. {
  1538. transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
  1539. return 0;
  1540. }
  1541. EXPORT_SYMBOL(transport_generic_handle_tmr);
  1542. /*
  1543. * If the task is active, request it to be stopped and sleep until it
  1544. * has completed.
  1545. */
  1546. bool target_stop_task(struct se_task *task, unsigned long *flags)
  1547. {
  1548. struct se_cmd *cmd = task->task_se_cmd;
  1549. bool was_active = false;
  1550. if (task->task_flags & TF_ACTIVE) {
  1551. task->task_flags |= TF_REQUEST_STOP;
  1552. spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
  1553. pr_debug("Task %p waiting to complete\n", task);
  1554. wait_for_completion(&task->task_stop_comp);
  1555. pr_debug("Task %p stopped successfully\n", task);
  1556. spin_lock_irqsave(&cmd->t_state_lock, *flags);
  1557. atomic_dec(&cmd->t_task_cdbs_left);
  1558. task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
  1559. was_active = true;
  1560. }
  1561. return was_active;
  1562. }
  1563. static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
  1564. {
  1565. struct se_task *task, *task_tmp;
  1566. unsigned long flags;
  1567. int ret = 0;
  1568. pr_debug("ITT[0x%08x] - Stopping tasks\n",
  1569. cmd->se_tfo->get_task_tag(cmd));
  1570. /*
  1571. * No tasks remain in the execution queue
  1572. */
  1573. spin_lock_irqsave(&cmd->t_state_lock, flags);
  1574. list_for_each_entry_safe(task, task_tmp,
  1575. &cmd->t_task_list, t_list) {
  1576. pr_debug("Processing task %p\n", task);
  1577. /*
  1578. * If the struct se_task has not been sent and is not active,
  1579. * remove the struct se_task from the execution queue.
  1580. */
  1581. if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
  1582. spin_unlock_irqrestore(&cmd->t_state_lock,
  1583. flags);
  1584. transport_remove_task_from_execute_queue(task,
  1585. cmd->se_dev);
  1586. pr_debug("Task %p removed from execute queue\n", task);
  1587. spin_lock_irqsave(&cmd->t_state_lock, flags);
  1588. continue;
  1589. }
  1590. if (!target_stop_task(task, &flags)) {
  1591. pr_debug("Task %p - did nothing\n", task);
  1592. ret++;
  1593. }
  1594. }
  1595. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  1596. return ret;
  1597. }
  1598. /*
  1599. * Handle SAM-esque emulation for generic transport request failures.
  1600. */
  1601. static void transport_generic_request_failure(
  1602. struct se_cmd *cmd,
  1603. int complete,
  1604. int sc)
  1605. {
  1606. int ret = 0;
  1607. pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
  1608. " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
  1609. cmd->t_task_cdb[0]);
  1610. pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
  1611. cmd->se_tfo->get_cmd_state(cmd),
  1612. cmd->t_state,
  1613. cmd->transport_error_status);
  1614. pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
  1615. " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
  1616. " t_transport_active: %d t_transport_stop: %d"
  1617. " t_transport_sent: %d\n", cmd->t_task_list_num,
  1618. atomic_read(&cmd->t_task_cdbs_left),
  1619. atomic_read(&cmd->t_task_cdbs_sent),
  1620. atomic_read(&cmd->t_task_cdbs_ex_left),
  1621. atomic_read(&cmd->t_transport_active),
  1622. atomic_read(&cmd->t_transport_stop),
  1623. atomic_read(&cmd->t_transport_sent));
  1624. /*
  1625. * For SAM Task Attribute emulation for failed struct se_cmd
  1626. */
  1627. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  1628. transport_complete_task_attr(cmd);
  1629. if (complete) {
  1630. cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
  1631. }
  1632. switch (cmd->transport_error_status) {
  1633. case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
  1634. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  1635. break;
  1636. case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
  1637. cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
  1638. break;
  1639. case PYX_TRANSPORT_INVALID_CDB_FIELD:
  1640. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  1641. break;
  1642. case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
  1643. cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
  1644. break;
  1645. case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
  1646. if (!sc)
  1647. transport_new_cmd_failure(cmd);
  1648. /*
  1649. * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
  1650. * we force this session to fall back to session
  1651. * recovery.
  1652. */
  1653. cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
  1654. cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
  1655. goto check_stop;
  1656. case PYX_TRANSPORT_LU_COMM_FAILURE:
  1657. case PYX_TRANSPORT_ILLEGAL_REQUEST:
  1658. cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  1659. break;
  1660. case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
  1661. cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
  1662. break;
  1663. case PYX_TRANSPORT_WRITE_PROTECTED:
  1664. cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  1665. break;
  1666. case PYX_TRANSPORT_RESERVATION_CONFLICT:
  1667. /*
  1668. * No SENSE Data payload for this case, set SCSI Status
  1669. * and queue the response to $FABRIC_MOD.
  1670. *
  1671. * Uses linux/include/scsi/scsi.h SAM status codes defs
  1672. */
  1673. cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
  1674. /*
  1675. * For UA Interlock Code 11b, a RESERVATION CONFLICT will
  1676. * establish a UNIT ATTENTION with PREVIOUS RESERVATION
  1677. * CONFLICT STATUS.
  1678. *
  1679. * See spc4r17, section 7.4.6 Control Mode Page, Table 349
  1680. */
  1681. if (cmd->se_sess &&
  1682. cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
  1683. core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
  1684. cmd->orig_fe_lun, 0x2C,
  1685. ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
  1686. ret = cmd->se_tfo->queue_status(cmd);
  1687. if (ret == -EAGAIN || ret == -ENOMEM)
  1688. goto queue_full;
  1689. goto check_stop;
  1690. case PYX_TRANSPORT_USE_SENSE_REASON:
  1691. /*
  1692. * struct se_cmd->scsi_sense_reason already set
  1693. */
  1694. break;
  1695. default:
  1696. pr_err("Unknown transport error for CDB 0x%02x: %d\n",
  1697. cmd->t_task_cdb[0],
  1698. cmd->transport_error_status);
  1699. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  1700. break;
  1701. }
  1702. /*
  1703. * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
  1704. * make the call to transport_send_check_condition_and_sense()
  1705. * directly. Otherwise expect the fabric to make the call to
  1706. * transport_send_check_condition_and_sense() after handling
  1707. * possible unsoliticied write data payloads.
  1708. */
  1709. if (!sc && !cmd->se_tfo->new_cmd_map)
  1710. transport_new_cmd_failure(cmd);
  1711. else {
  1712. ret = transport_send_check_condition_and_sense(cmd,
  1713. cmd->scsi_sense_reason, 0);
  1714. if (ret == -EAGAIN || ret == -ENOMEM)
  1715. goto queue_full;
  1716. }
  1717. check_stop:
  1718. transport_lun_remove_cmd(cmd);
  1719. if (!transport_cmd_check_stop_to_fabric(cmd))
  1720. ;
  1721. return;
  1722. queue_full:
  1723. cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
  1724. transport_handle_queue_full(cmd, cmd->se_dev);
  1725. }
  1726. static inline u32 transport_lba_21(unsigned char *cdb)
  1727. {
  1728. return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
  1729. }
  1730. static inline u32 transport_lba_32(unsigned char *cdb)
  1731. {
  1732. return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
  1733. }
  1734. static inline unsigned long long transport_lba_64(unsigned char *cdb)
  1735. {
  1736. unsigned int __v1, __v2;
  1737. __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
  1738. __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
  1739. return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
  1740. }
  1741. /*
  1742. * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
  1743. */
  1744. static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
  1745. {
  1746. unsigned int __v1, __v2;
  1747. __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
  1748. __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
  1749. return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
  1750. }
  1751. static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
  1752. {
  1753. unsigned long flags;
  1754. spin_lock_irqsave(&se_cmd->t_state_lock, flags);
  1755. se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
  1756. spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
  1757. }
  1758. static inline int transport_tcq_window_closed(struct se_device *dev)
  1759. {
  1760. if (dev->dev_tcq_window_closed++ <
  1761. PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
  1762. msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
  1763. } else
  1764. msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
  1765. wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
  1766. return 0;
  1767. }
  1768. /*
  1769. * Called from Fabric Module context from transport_execute_tasks()
  1770. *
  1771. * The return of this function determins if the tasks from struct se_cmd
  1772. * get added to the execution queue in transport_execute_tasks(),
  1773. * or are added to the delayed or ordered lists here.
  1774. */
  1775. static inline int transport_execute_task_attr(struct se_cmd *cmd)
  1776. {
  1777. if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
  1778. return 1;
  1779. /*
  1780. * Check for the existence of HEAD_OF_QUEUE, and if true return 1
  1781. * to allow the passed struct se_cmd list of tasks to the front of the list.
  1782. */
  1783. if (cmd->sam_task_attr == MSG_HEAD_TAG) {
  1784. atomic_inc(&cmd->se_dev->dev_hoq_count);
  1785. smp_mb__after_atomic_inc();
  1786. pr_debug("Added HEAD_OF_QUEUE for CDB:"
  1787. " 0x%02x, se_ordered_id: %u\n",
  1788. cmd->t_task_cdb[0],
  1789. cmd->se_ordered_id);
  1790. return 1;
  1791. } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
  1792. spin_lock(&cmd->se_dev->ordered_cmd_lock);
  1793. list_add_tail(&cmd->se_ordered_node,
  1794. &cmd->se_dev->ordered_cmd_list);
  1795. spin_unlock(&cmd->se_dev->ordered_cmd_lock);
  1796. atomic_inc(&cmd->se_dev->dev_ordered_sync);
  1797. smp_mb__after_atomic_inc();
  1798. pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
  1799. " list, se_ordered_id: %u\n",
  1800. cmd->t_task_cdb[0],
  1801. cmd->se_ordered_id);
  1802. /*
  1803. * Add ORDERED command to tail of execution queue if
  1804. * no other older commands exist that need to be
  1805. * completed first.
  1806. */
  1807. if (!atomic_read(&cmd->se_dev->simple_cmds))
  1808. return 1;
  1809. } else {
  1810. /*
  1811. * For SIMPLE and UNTAGGED Task Attribute commands
  1812. */
  1813. atomic_inc(&cmd->se_dev->simple_cmds);
  1814. smp_mb__after_atomic_inc();
  1815. }
  1816. /*
  1817. * Otherwise if one or more outstanding ORDERED task attribute exist,
  1818. * add the dormant task(s) built for the passed struct se_cmd to the
  1819. * execution queue and become in Active state for this struct se_device.
  1820. */
  1821. if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
  1822. /*
  1823. * Otherwise, add cmd w/ tasks to delayed cmd queue that
  1824. * will be drained upon completion of HEAD_OF_QUEUE task.
  1825. */
  1826. spin_lock(&cmd->se_dev->delayed_cmd_lock);
  1827. cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
  1828. list_add_tail(&cmd->se_delayed_node,
  1829. &cmd->se_dev->delayed_cmd_list);
  1830. spin_unlock(&cmd->se_dev->delayed_cmd_lock);
  1831. pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
  1832. " delayed CMD list, se_ordered_id: %u\n",
  1833. cmd->t_task_cdb[0], cmd->sam_task_attr,
  1834. cmd->se_ordered_id);
  1835. /*
  1836. * Return zero to let transport_execute_tasks() know
  1837. * not to add the delayed tasks to the execution list.
  1838. */
  1839. return 0;
  1840. }
  1841. /*
  1842. * Otherwise, no ORDERED task attributes exist..
  1843. */
  1844. return 1;
  1845. }
  1846. /*
  1847. * Called from fabric module context in transport_generic_new_cmd() and
  1848. * transport_generic_process_write()
  1849. */
  1850. static int transport_execute_tasks(struct se_cmd *cmd)
  1851. {
  1852. int add_tasks;
  1853. if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
  1854. cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
  1855. transport_generic_request_failure(cmd, 0, 1);
  1856. return 0;
  1857. }
  1858. /*
  1859. * Call transport_cmd_check_stop() to see if a fabric exception
  1860. * has occurred that prevents execution.
  1861. */
  1862. if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
  1863. /*
  1864. * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
  1865. * attribute for the tasks of the received struct se_cmd CDB
  1866. */
  1867. add_tasks = transport_execute_task_attr(cmd);
  1868. if (!add_tasks)
  1869. goto execute_tasks;
  1870. /*
  1871. * This calls transport_add_tasks_from_cmd() to handle
  1872. * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
  1873. * (if enabled) in __transport_add_task_to_execute_queue() and
  1874. * transport_add_task_check_sam_attr().
  1875. */
  1876. transport_add_tasks_from_cmd(cmd);
  1877. }
  1878. /*
  1879. * Kick the execution queue for the cmd associated struct se_device
  1880. * storage object.
  1881. */
  1882. execute_tasks:
  1883. __transport_execute_tasks(cmd->se_dev);
  1884. return 0;
  1885. }
  1886. /*
  1887. * Called to check struct se_device tcq depth window, and once open pull struct se_task
  1888. * from struct se_device->execute_task_list and
  1889. *
  1890. * Called from transport_processing_thread()
  1891. */
  1892. static int __transport_execute_tasks(struct se_device *dev)
  1893. {
  1894. int error;
  1895. struct se_cmd *cmd = NULL;
  1896. struct se_task *task = NULL;
  1897. unsigned long flags;
  1898. /*
  1899. * Check if there is enough room in the device and HBA queue to send
  1900. * struct se_tasks to the selected transport.
  1901. */
  1902. check_depth:
  1903. if (!atomic_read(&dev->depth_left))
  1904. return transport_tcq_window_closed(dev);
  1905. dev->dev_tcq_window_closed = 0;
  1906. spin_lock_irq(&dev->execute_task_lock);
  1907. if (list_empty(&dev->execute_task_list)) {
  1908. spin_unlock_irq(&dev->execute_task_lock);
  1909. return 0;
  1910. }
  1911. task = list_first_entry(&dev->execute_task_list,
  1912. struct se_task, t_execute_list);
  1913. __transport_remove_task_from_execute_queue(task, dev);
  1914. spin_unlock_irq(&dev->execute_task_lock);
  1915. atomic_dec(&dev->depth_left);
  1916. cmd = task->task_se_cmd;
  1917. spin_lock_irqsave(&cmd->t_state_lock, flags);
  1918. task->task_flags |= (TF_ACTIVE | TF_SENT);
  1919. atomic_inc(&cmd->t_task_cdbs_sent);
  1920. if (atomic_read(&cmd->t_task_cdbs_sent) ==
  1921. cmd->t_task_list_num)
  1922. atomic_set(&cmd->t_transport_sent, 1);
  1923. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  1924. if (cmd->execute_task)
  1925. error = cmd->execute_task(task);
  1926. else
  1927. error = dev->transport->do_task(task);
  1928. if (error != 0) {
  1929. cmd->transport_error_status = error;
  1930. spin_lock_irqsave(&cmd->t_state_lock, flags);
  1931. task->task_flags &= ~TF_ACTIVE;
  1932. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  1933. atomic_set(&cmd->t_transport_sent, 0);
  1934. transport_stop_tasks_for_cmd(cmd);
  1935. atomic_inc(&dev->depth_left);
  1936. transport_generic_request_failure(cmd, 0, 1);
  1937. }
  1938. goto check_depth;
  1939. return 0;
  1940. }
  1941. void transport_new_cmd_failure(struct se_cmd *se_cmd)
  1942. {
  1943. unsigned long flags;
  1944. /*
  1945. * Any unsolicited data will get dumped for failed command inside of
  1946. * the fabric plugin
  1947. */
  1948. spin_lock_irqsave(&se_cmd->t_state_lock, flags);
  1949. se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
  1950. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  1951. spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
  1952. }
  1953. static inline u32 transport_get_sectors_6(
  1954. unsigned char *cdb,
  1955. struct se_cmd *cmd,
  1956. int *ret)
  1957. {
  1958. struct se_device *dev = cmd->se_dev;
  1959. /*
  1960. * Assume TYPE_DISK for non struct se_device objects.
  1961. * Use 8-bit sector value.
  1962. */
  1963. if (!dev)
  1964. goto type_disk;
  1965. /*
  1966. * Use 24-bit allocation length for TYPE_TAPE.
  1967. */
  1968. if (dev->transport->get_device_type(dev) == TYPE_TAPE)
  1969. return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
  1970. /*
  1971. * Everything else assume TYPE_DISK Sector CDB location.
  1972. * Use 8-bit sector value.
  1973. */
  1974. type_disk:
  1975. return (u32)cdb[4];
  1976. }
  1977. static inline u32 transport_get_sectors_10(
  1978. unsigned char *cdb,
  1979. struct se_cmd *cmd,
  1980. int *ret)
  1981. {
  1982. struct se_device *dev = cmd->se_dev;
  1983. /*
  1984. * Assume TYPE_DISK for non struct se_device objects.
  1985. * Use 16-bit sector value.
  1986. */
  1987. if (!dev)
  1988. goto type_disk;
  1989. /*
  1990. * XXX_10 is not defined in SSC, throw an exception
  1991. */
  1992. if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
  1993. *ret = -EINVAL;
  1994. return 0;
  1995. }
  1996. /*
  1997. * Everything else assume TYPE_DISK Sector CDB location.
  1998. * Use 16-bit sector value.
  1999. */
  2000. type_disk:
  2001. return (u32)(cdb[7] << 8) + cdb[8];
  2002. }
  2003. static inline u32 transport_get_sectors_12(
  2004. unsigned char *cdb,
  2005. struct se_cmd *cmd,
  2006. int *ret)
  2007. {
  2008. struct se_device *dev = cmd->se_dev;
  2009. /*
  2010. * Assume TYPE_DISK for non struct se_device objects.
  2011. * Use 32-bit sector value.
  2012. */
  2013. if (!dev)
  2014. goto type_disk;
  2015. /*
  2016. * XXX_12 is not defined in SSC, throw an exception
  2017. */
  2018. if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
  2019. *ret = -EINVAL;
  2020. return 0;
  2021. }
  2022. /*
  2023. * Everything else assume TYPE_DISK Sector CDB location.
  2024. * Use 32-bit sector value.
  2025. */
  2026. type_disk:
  2027. return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
  2028. }
  2029. static inline u32 transport_get_sectors_16(
  2030. unsigned char *cdb,
  2031. struct se_cmd *cmd,
  2032. int *ret)
  2033. {
  2034. struct se_device *dev = cmd->se_dev;
  2035. /*
  2036. * Assume TYPE_DISK for non struct se_device objects.
  2037. * Use 32-bit sector value.
  2038. */
  2039. if (!dev)
  2040. goto type_disk;
  2041. /*
  2042. * Use 24-bit allocation length for TYPE_TAPE.
  2043. */
  2044. if (dev->transport->get_device_type(dev) == TYPE_TAPE)
  2045. return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
  2046. type_disk:
  2047. return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
  2048. (cdb[12] << 8) + cdb[13];
  2049. }
  2050. /*
  2051. * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
  2052. */
  2053. static inline u32 transport_get_sectors_32(
  2054. unsigned char *cdb,
  2055. struct se_cmd *cmd,
  2056. int *ret)
  2057. {
  2058. /*
  2059. * Assume TYPE_DISK for non struct se_device objects.
  2060. * Use 32-bit sector value.
  2061. */
  2062. return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
  2063. (cdb[30] << 8) + cdb[31];
  2064. }
  2065. static inline u32 transport_get_size(
  2066. u32 sectors,
  2067. unsigned char *cdb,
  2068. struct se_cmd *cmd)
  2069. {
  2070. struct se_device *dev = cmd->se_dev;
  2071. if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
  2072. if (cdb[1] & 1) { /* sectors */
  2073. return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
  2074. } else /* bytes */
  2075. return sectors;
  2076. }
  2077. #if 0
  2078. pr_debug("Returning block_size: %u, sectors: %u == %u for"
  2079. " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
  2080. dev->se_sub_dev->se_dev_attrib.block_size * sectors,
  2081. dev->transport->name);
  2082. #endif
  2083. return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
  2084. }
  2085. static void transport_xor_callback(struct se_cmd *cmd)
  2086. {
  2087. unsigned char *buf, *addr;
  2088. struct scatterlist *sg;
  2089. unsigned int offset;
  2090. int i;
  2091. int count;
  2092. /*
  2093. * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
  2094. *
  2095. * 1) read the specified logical block(s);
  2096. * 2) transfer logical blocks from the data-out buffer;
  2097. * 3) XOR the logical blocks transferred from the data-out buffer with
  2098. * the logical blocks read, storing the resulting XOR data in a buffer;
  2099. * 4) if the DISABLE WRITE bit is set to zero, then write the logical
  2100. * blocks transferred from the data-out buffer; and
  2101. * 5) transfer the resulting XOR data to the data-in buffer.
  2102. */
  2103. buf = kmalloc(cmd->data_length, GFP_KERNEL);
  2104. if (!buf) {
  2105. pr_err("Unable to allocate xor_callback buf\n");
  2106. return;
  2107. }
  2108. /*
  2109. * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
  2110. * into the locally allocated *buf
  2111. */
  2112. sg_copy_to_buffer(cmd->t_data_sg,
  2113. cmd->t_data_nents,
  2114. buf,
  2115. cmd->data_length);
  2116. /*
  2117. * Now perform the XOR against the BIDI read memory located at
  2118. * cmd->t_mem_bidi_list
  2119. */
  2120. offset = 0;
  2121. for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
  2122. addr = kmap_atomic(sg_page(sg), KM_USER0);
  2123. if (!addr)
  2124. goto out;
  2125. for (i = 0; i < sg->length; i++)
  2126. *(addr + sg->offset + i) ^= *(buf + offset + i);
  2127. offset += sg->length;
  2128. kunmap_atomic(addr, KM_USER0);
  2129. }
  2130. out:
  2131. kfree(buf);
  2132. }
  2133. /*
  2134. * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
  2135. */
  2136. static int transport_get_sense_data(struct se_cmd *cmd)
  2137. {
  2138. unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
  2139. struct se_device *dev = cmd->se_dev;
  2140. struct se_task *task = NULL, *task_tmp;
  2141. unsigned long flags;
  2142. u32 offset = 0;
  2143. WARN_ON(!cmd->se_lun);
  2144. if (!dev)
  2145. return 0;
  2146. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2147. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
  2148. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2149. return 0;
  2150. }
  2151. list_for_each_entry_safe(task, task_tmp,
  2152. &cmd->t_task_list, t_list) {
  2153. if (!task->task_sense)
  2154. continue;
  2155. if (!dev->transport->get_sense_buffer) {
  2156. pr_err("dev->transport->get_sense_buffer"
  2157. " is NULL\n");
  2158. continue;
  2159. }
  2160. sense_buffer = dev->transport->get_sense_buffer(task);
  2161. if (!sense_buffer) {
  2162. pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
  2163. " sense buffer for task with sense\n",
  2164. cmd->se_tfo->get_task_tag(cmd), task);
  2165. continue;
  2166. }
  2167. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2168. offset = cmd->se_tfo->set_fabric_sense_len(cmd,
  2169. TRANSPORT_SENSE_BUFFER);
  2170. memcpy(&buffer[offset], sense_buffer,
  2171. TRANSPORT_SENSE_BUFFER);
  2172. cmd->scsi_status = task->task_scsi_status;
  2173. /* Automatically padded */
  2174. cmd->scsi_sense_length =
  2175. (TRANSPORT_SENSE_BUFFER + offset);
  2176. pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
  2177. " and sense\n",
  2178. dev->se_hba->hba_id, dev->transport->name,
  2179. cmd->scsi_status);
  2180. return 0;
  2181. }
  2182. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2183. return -1;
  2184. }
  2185. static int
  2186. transport_handle_reservation_conflict(struct se_cmd *cmd)
  2187. {
  2188. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2189. cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
  2190. cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
  2191. /*
  2192. * For UA Interlock Code 11b, a RESERVATION CONFLICT will
  2193. * establish a UNIT ATTENTION with PREVIOUS RESERVATION
  2194. * CONFLICT STATUS.
  2195. *
  2196. * See spc4r17, section 7.4.6 Control Mode Page, Table 349
  2197. */
  2198. if (cmd->se_sess &&
  2199. cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
  2200. core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
  2201. cmd->orig_fe_lun, 0x2C,
  2202. ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
  2203. return -EINVAL;
  2204. }
  2205. static inline long long transport_dev_end_lba(struct se_device *dev)
  2206. {
  2207. return dev->transport->get_blocks(dev) + 1;
  2208. }
  2209. static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
  2210. {
  2211. struct se_device *dev = cmd->se_dev;
  2212. u32 sectors;
  2213. if (dev->transport->get_device_type(dev) != TYPE_DISK)
  2214. return 0;
  2215. sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
  2216. if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
  2217. pr_err("LBA: %llu Sectors: %u exceeds"
  2218. " transport_dev_end_lba(): %llu\n",
  2219. cmd->t_task_lba, sectors,
  2220. transport_dev_end_lba(dev));
  2221. return -EINVAL;
  2222. }
  2223. return 0;
  2224. }
  2225. static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
  2226. {
  2227. /*
  2228. * Determine if the received WRITE_SAME is used to for direct
  2229. * passthrough into Linux/SCSI with struct request via TCM/pSCSI
  2230. * or we are signaling the use of internal WRITE_SAME + UNMAP=1
  2231. * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
  2232. */
  2233. int passthrough = (dev->transport->transport_type ==
  2234. TRANSPORT_PLUGIN_PHBA_PDEV);
  2235. if (!passthrough) {
  2236. if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
  2237. pr_err("WRITE_SAME PBDATA and LBDATA"
  2238. " bits not supported for Block Discard"
  2239. " Emulation\n");
  2240. return -ENOSYS;
  2241. }
  2242. /*
  2243. * Currently for the emulated case we only accept
  2244. * tpws with the UNMAP=1 bit set.
  2245. */
  2246. if (!(flags[0] & 0x08)) {
  2247. pr_err("WRITE_SAME w/o UNMAP bit not"
  2248. " supported for Block Discard Emulation\n");
  2249. return -ENOSYS;
  2250. }
  2251. }
  2252. return 0;
  2253. }
  2254. /* transport_generic_cmd_sequencer():
  2255. *
  2256. * Generic Command Sequencer that should work for most DAS transport
  2257. * drivers.
  2258. *
  2259. * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
  2260. * RX Thread.
  2261. *
  2262. * FIXME: Need to support other SCSI OPCODES where as well.
  2263. */
  2264. static int transport_generic_cmd_sequencer(
  2265. struct se_cmd *cmd,
  2266. unsigned char *cdb)
  2267. {
  2268. struct se_device *dev = cmd->se_dev;
  2269. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  2270. int ret = 0, sector_ret = 0, passthrough;
  2271. u32 sectors = 0, size = 0, pr_reg_type = 0;
  2272. u16 service_action;
  2273. u8 alua_ascq = 0;
  2274. /*
  2275. * Check for an existing UNIT ATTENTION condition
  2276. */
  2277. if (core_scsi3_ua_check(cmd, cdb) < 0) {
  2278. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2279. cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
  2280. return -EINVAL;
  2281. }
  2282. /*
  2283. * Check status of Asymmetric Logical Unit Assignment port
  2284. */
  2285. ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
  2286. if (ret != 0) {
  2287. /*
  2288. * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
  2289. * The ALUA additional sense code qualifier (ASCQ) is determined
  2290. * by the ALUA primary or secondary access state..
  2291. */
  2292. if (ret > 0) {
  2293. #if 0
  2294. pr_debug("[%s]: ALUA TG Port not available,"
  2295. " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
  2296. cmd->se_tfo->get_fabric_name(), alua_ascq);
  2297. #endif
  2298. transport_set_sense_codes(cmd, 0x04, alua_ascq);
  2299. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2300. cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
  2301. return -EINVAL;
  2302. }
  2303. goto out_invalid_cdb_field;
  2304. }
  2305. /*
  2306. * Check status for SPC-3 Persistent Reservations
  2307. */
  2308. if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
  2309. if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
  2310. cmd, cdb, pr_reg_type) != 0)
  2311. return transport_handle_reservation_conflict(cmd);
  2312. /*
  2313. * This means the CDB is allowed for the SCSI Initiator port
  2314. * when said port is *NOT* holding the legacy SPC-2 or
  2315. * SPC-3 Persistent Reservation.
  2316. */
  2317. }
  2318. /*
  2319. * If we operate in passthrough mode we skip most CDB emulation and
  2320. * instead hand the commands down to the physical SCSI device.
  2321. */
  2322. passthrough =
  2323. (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
  2324. switch (cdb[0]) {
  2325. case READ_6:
  2326. sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
  2327. if (sector_ret)
  2328. goto out_unsupported_cdb;
  2329. size = transport_get_size(sectors, cdb, cmd);
  2330. cmd->t_task_lba = transport_lba_21(cdb);
  2331. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2332. break;
  2333. case READ_10:
  2334. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2335. if (sector_ret)
  2336. goto out_unsupported_cdb;
  2337. size = transport_get_size(sectors, cdb, cmd);
  2338. cmd->t_task_lba = transport_lba_32(cdb);
  2339. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2340. break;
  2341. case READ_12:
  2342. sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
  2343. if (sector_ret)
  2344. goto out_unsupported_cdb;
  2345. size = transport_get_size(sectors, cdb, cmd);
  2346. cmd->t_task_lba = transport_lba_32(cdb);
  2347. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2348. break;
  2349. case READ_16:
  2350. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2351. if (sector_ret)
  2352. goto out_unsupported_cdb;
  2353. size = transport_get_size(sectors, cdb, cmd);
  2354. cmd->t_task_lba = transport_lba_64(cdb);
  2355. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2356. break;
  2357. case WRITE_6:
  2358. sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
  2359. if (sector_ret)
  2360. goto out_unsupported_cdb;
  2361. size = transport_get_size(sectors, cdb, cmd);
  2362. cmd->t_task_lba = transport_lba_21(cdb);
  2363. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2364. break;
  2365. case WRITE_10:
  2366. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2367. if (sector_ret)
  2368. goto out_unsupported_cdb;
  2369. size = transport_get_size(sectors, cdb, cmd);
  2370. cmd->t_task_lba = transport_lba_32(cdb);
  2371. cmd->t_tasks_fua = (cdb[1] & 0x8);
  2372. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2373. break;
  2374. case WRITE_12:
  2375. sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
  2376. if (sector_ret)
  2377. goto out_unsupported_cdb;
  2378. size = transport_get_size(sectors, cdb, cmd);
  2379. cmd->t_task_lba = transport_lba_32(cdb);
  2380. cmd->t_tasks_fua = (cdb[1] & 0x8);
  2381. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2382. break;
  2383. case WRITE_16:
  2384. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2385. if (sector_ret)
  2386. goto out_unsupported_cdb;
  2387. size = transport_get_size(sectors, cdb, cmd);
  2388. cmd->t_task_lba = transport_lba_64(cdb);
  2389. cmd->t_tasks_fua = (cdb[1] & 0x8);
  2390. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2391. break;
  2392. case XDWRITEREAD_10:
  2393. if ((cmd->data_direction != DMA_TO_DEVICE) ||
  2394. !(cmd->t_tasks_bidi))
  2395. goto out_invalid_cdb_field;
  2396. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2397. if (sector_ret)
  2398. goto out_unsupported_cdb;
  2399. size = transport_get_size(sectors, cdb, cmd);
  2400. cmd->t_task_lba = transport_lba_32(cdb);
  2401. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2402. /*
  2403. * Do now allow BIDI commands for passthrough mode.
  2404. */
  2405. if (passthrough)
  2406. goto out_unsupported_cdb;
  2407. /*
  2408. * Setup BIDI XOR callback to be run after I/O completion.
  2409. */
  2410. cmd->transport_complete_callback = &transport_xor_callback;
  2411. cmd->t_tasks_fua = (cdb[1] & 0x8);
  2412. break;
  2413. case VARIABLE_LENGTH_CMD:
  2414. service_action = get_unaligned_be16(&cdb[8]);
  2415. switch (service_action) {
  2416. case XDWRITEREAD_32:
  2417. sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
  2418. if (sector_ret)
  2419. goto out_unsupported_cdb;
  2420. size = transport_get_size(sectors, cdb, cmd);
  2421. /*
  2422. * Use WRITE_32 and READ_32 opcodes for the emulated
  2423. * XDWRITE_READ_32 logic.
  2424. */
  2425. cmd->t_task_lba = transport_lba_64_ext(cdb);
  2426. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2427. /*
  2428. * Do now allow BIDI commands for passthrough mode.
  2429. */
  2430. if (passthrough)
  2431. goto out_unsupported_cdb;
  2432. /*
  2433. * Setup BIDI XOR callback to be run during after I/O
  2434. * completion.
  2435. */
  2436. cmd->transport_complete_callback = &transport_xor_callback;
  2437. cmd->t_tasks_fua = (cdb[10] & 0x8);
  2438. break;
  2439. case WRITE_SAME_32:
  2440. sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
  2441. if (sector_ret)
  2442. goto out_unsupported_cdb;
  2443. if (sectors)
  2444. size = transport_get_size(1, cdb, cmd);
  2445. else {
  2446. pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
  2447. " supported\n");
  2448. goto out_invalid_cdb_field;
  2449. }
  2450. cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
  2451. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2452. if (target_check_write_same_discard(&cdb[10], dev) < 0)
  2453. goto out_invalid_cdb_field;
  2454. if (!passthrough)
  2455. cmd->execute_task = target_emulate_write_same;
  2456. break;
  2457. default:
  2458. pr_err("VARIABLE_LENGTH_CMD service action"
  2459. " 0x%04x not supported\n", service_action);
  2460. goto out_unsupported_cdb;
  2461. }
  2462. break;
  2463. case MAINTENANCE_IN:
  2464. if (dev->transport->get_device_type(dev) != TYPE_ROM) {
  2465. /* MAINTENANCE_IN from SCC-2 */
  2466. /*
  2467. * Check for emulated MI_REPORT_TARGET_PGS.
  2468. */
  2469. if (cdb[1] == MI_REPORT_TARGET_PGS &&
  2470. su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
  2471. cmd->execute_task =
  2472. target_emulate_report_target_port_groups;
  2473. }
  2474. size = (cdb[6] << 24) | (cdb[7] << 16) |
  2475. (cdb[8] << 8) | cdb[9];
  2476. } else {
  2477. /* GPCMD_SEND_KEY from multi media commands */
  2478. size = (cdb[8] << 8) + cdb[9];
  2479. }
  2480. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2481. break;
  2482. case MODE_SELECT:
  2483. size = cdb[4];
  2484. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2485. break;
  2486. case MODE_SELECT_10:
  2487. size = (cdb[7] << 8) + cdb[8];
  2488. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2489. break;
  2490. case MODE_SENSE:
  2491. size = cdb[4];
  2492. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2493. if (!passthrough)
  2494. cmd->execute_task = target_emulate_modesense;
  2495. break;
  2496. case MODE_SENSE_10:
  2497. size = (cdb[7] << 8) + cdb[8];
  2498. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2499. if (!passthrough)
  2500. cmd->execute_task = target_emulate_modesense;
  2501. break;
  2502. case GPCMD_READ_BUFFER_CAPACITY:
  2503. case GPCMD_SEND_OPC:
  2504. case LOG_SELECT:
  2505. case LOG_SENSE:
  2506. size = (cdb[7] << 8) + cdb[8];
  2507. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2508. break;
  2509. case READ_BLOCK_LIMITS:
  2510. size = READ_BLOCK_LEN;
  2511. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2512. break;
  2513. case GPCMD_GET_CONFIGURATION:
  2514. case GPCMD_READ_FORMAT_CAPACITIES:
  2515. case GPCMD_READ_DISC_INFO:
  2516. case GPCMD_READ_TRACK_RZONE_INFO:
  2517. size = (cdb[7] << 8) + cdb[8];
  2518. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2519. break;
  2520. case PERSISTENT_RESERVE_IN:
  2521. if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
  2522. cmd->execute_task = target_scsi3_emulate_pr_in;
  2523. size = (cdb[7] << 8) + cdb[8];
  2524. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2525. break;
  2526. case PERSISTENT_RESERVE_OUT:
  2527. if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
  2528. cmd->execute_task = target_scsi3_emulate_pr_out;
  2529. size = (cdb[7] << 8) + cdb[8];
  2530. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2531. break;
  2532. case GPCMD_MECHANISM_STATUS:
  2533. case GPCMD_READ_DVD_STRUCTURE:
  2534. size = (cdb[8] << 8) + cdb[9];
  2535. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2536. break;
  2537. case READ_POSITION:
  2538. size = READ_POSITION_LEN;
  2539. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2540. break;
  2541. case MAINTENANCE_OUT:
  2542. if (dev->transport->get_device_type(dev) != TYPE_ROM) {
  2543. /* MAINTENANCE_OUT from SCC-2
  2544. *
  2545. * Check for emulated MO_SET_TARGET_PGS.
  2546. */
  2547. if (cdb[1] == MO_SET_TARGET_PGS &&
  2548. su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
  2549. cmd->execute_task =
  2550. target_emulate_set_target_port_groups;
  2551. }
  2552. size = (cdb[6] << 24) | (cdb[7] << 16) |
  2553. (cdb[8] << 8) | cdb[9];
  2554. } else {
  2555. /* GPCMD_REPORT_KEY from multi media commands */
  2556. size = (cdb[8] << 8) + cdb[9];
  2557. }
  2558. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2559. break;
  2560. case INQUIRY:
  2561. size = (cdb[3] << 8) + cdb[4];
  2562. /*
  2563. * Do implict HEAD_OF_QUEUE processing for INQUIRY.
  2564. * See spc4r17 section 5.3
  2565. */
  2566. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  2567. cmd->sam_task_attr = MSG_HEAD_TAG;
  2568. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2569. if (!passthrough)
  2570. cmd->execute_task = target_emulate_inquiry;
  2571. break;
  2572. case READ_BUFFER:
  2573. size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
  2574. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2575. break;
  2576. case READ_CAPACITY:
  2577. size = READ_CAP_LEN;
  2578. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2579. if (!passthrough)
  2580. cmd->execute_task = target_emulate_readcapacity;
  2581. break;
  2582. case READ_MEDIA_SERIAL_NUMBER:
  2583. case SECURITY_PROTOCOL_IN:
  2584. case SECURITY_PROTOCOL_OUT:
  2585. size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
  2586. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2587. break;
  2588. case SERVICE_ACTION_IN:
  2589. switch (cmd->t_task_cdb[1] & 0x1f) {
  2590. case SAI_READ_CAPACITY_16:
  2591. if (!passthrough)
  2592. cmd->execute_task =
  2593. target_emulate_readcapacity_16;
  2594. break;
  2595. default:
  2596. if (passthrough)
  2597. break;
  2598. pr_err("Unsupported SA: 0x%02x\n",
  2599. cmd->t_task_cdb[1] & 0x1f);
  2600. goto out_unsupported_cdb;
  2601. }
  2602. /*FALLTHROUGH*/
  2603. case ACCESS_CONTROL_IN:
  2604. case ACCESS_CONTROL_OUT:
  2605. case EXTENDED_COPY:
  2606. case READ_ATTRIBUTE:
  2607. case RECEIVE_COPY_RESULTS:
  2608. case WRITE_ATTRIBUTE:
  2609. size = (cdb[10] << 24) | (cdb[11] << 16) |
  2610. (cdb[12] << 8) | cdb[13];
  2611. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2612. break;
  2613. case RECEIVE_DIAGNOSTIC:
  2614. case SEND_DIAGNOSTIC:
  2615. size = (cdb[3] << 8) | cdb[4];
  2616. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2617. break;
  2618. /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
  2619. #if 0
  2620. case GPCMD_READ_CD:
  2621. sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
  2622. size = (2336 * sectors);
  2623. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2624. break;
  2625. #endif
  2626. case READ_TOC:
  2627. size = cdb[8];
  2628. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2629. break;
  2630. case REQUEST_SENSE:
  2631. size = cdb[4];
  2632. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2633. if (!passthrough)
  2634. cmd->execute_task = target_emulate_request_sense;
  2635. break;
  2636. case READ_ELEMENT_STATUS:
  2637. size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
  2638. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2639. break;
  2640. case WRITE_BUFFER:
  2641. size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
  2642. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2643. break;
  2644. case RESERVE:
  2645. case RESERVE_10:
  2646. /*
  2647. * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
  2648. * Assume the passthrough or $FABRIC_MOD will tell us about it.
  2649. */
  2650. if (cdb[0] == RESERVE_10)
  2651. size = (cdb[7] << 8) | cdb[8];
  2652. else
  2653. size = cmd->data_length;
  2654. /*
  2655. * Setup the legacy emulated handler for SPC-2 and
  2656. * >= SPC-3 compatible reservation handling (CRH=1)
  2657. * Otherwise, we assume the underlying SCSI logic is
  2658. * is running in SPC_PASSTHROUGH, and wants reservations
  2659. * emulation disabled.
  2660. */
  2661. if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
  2662. cmd->execute_task = target_scsi2_reservation_reserve;
  2663. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2664. break;
  2665. case RELEASE:
  2666. case RELEASE_10:
  2667. /*
  2668. * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
  2669. * Assume the passthrough or $FABRIC_MOD will tell us about it.
  2670. */
  2671. if (cdb[0] == RELEASE_10)
  2672. size = (cdb[7] << 8) | cdb[8];
  2673. else
  2674. size = cmd->data_length;
  2675. if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
  2676. cmd->execute_task = target_scsi2_reservation_release;
  2677. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2678. break;
  2679. case SYNCHRONIZE_CACHE:
  2680. case 0x91: /* SYNCHRONIZE_CACHE_16: */
  2681. /*
  2682. * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
  2683. */
  2684. if (cdb[0] == SYNCHRONIZE_CACHE) {
  2685. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2686. cmd->t_task_lba = transport_lba_32(cdb);
  2687. } else {
  2688. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2689. cmd->t_task_lba = transport_lba_64(cdb);
  2690. }
  2691. if (sector_ret)
  2692. goto out_unsupported_cdb;
  2693. size = transport_get_size(sectors, cdb, cmd);
  2694. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2695. if (passthrough)
  2696. break;
  2697. /*
  2698. * Check to ensure that LBA + Range does not exceed past end of
  2699. * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
  2700. */
  2701. if ((cmd->t_task_lba != 0) || (sectors != 0)) {
  2702. if (transport_cmd_get_valid_sectors(cmd) < 0)
  2703. goto out_invalid_cdb_field;
  2704. }
  2705. cmd->execute_task = target_emulate_synchronize_cache;
  2706. break;
  2707. case UNMAP:
  2708. size = get_unaligned_be16(&cdb[7]);
  2709. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2710. if (!passthrough)
  2711. cmd->execute_task = target_emulate_unmap;
  2712. break;
  2713. case WRITE_SAME_16:
  2714. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2715. if (sector_ret)
  2716. goto out_unsupported_cdb;
  2717. if (sectors)
  2718. size = transport_get_size(1, cdb, cmd);
  2719. else {
  2720. pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
  2721. goto out_invalid_cdb_field;
  2722. }
  2723. cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
  2724. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2725. if (target_check_write_same_discard(&cdb[1], dev) < 0)
  2726. goto out_invalid_cdb_field;
  2727. if (!passthrough)
  2728. cmd->execute_task = target_emulate_write_same;
  2729. break;
  2730. case WRITE_SAME:
  2731. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2732. if (sector_ret)
  2733. goto out_unsupported_cdb;
  2734. if (sectors)
  2735. size = transport_get_size(1, cdb, cmd);
  2736. else {
  2737. pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
  2738. goto out_invalid_cdb_field;
  2739. }
  2740. cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
  2741. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2742. /*
  2743. * Follow sbcr26 with WRITE_SAME (10) and check for the existence
  2744. * of byte 1 bit 3 UNMAP instead of original reserved field
  2745. */
  2746. if (target_check_write_same_discard(&cdb[1], dev) < 0)
  2747. goto out_invalid_cdb_field;
  2748. if (!passthrough)
  2749. cmd->execute_task = target_emulate_write_same;
  2750. break;
  2751. case ALLOW_MEDIUM_REMOVAL:
  2752. case ERASE:
  2753. case REZERO_UNIT:
  2754. case SEEK_10:
  2755. case SPACE:
  2756. case START_STOP:
  2757. case TEST_UNIT_READY:
  2758. case VERIFY:
  2759. case WRITE_FILEMARKS:
  2760. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2761. if (!passthrough)
  2762. cmd->execute_task = target_emulate_noop;
  2763. break;
  2764. case GPCMD_CLOSE_TRACK:
  2765. case INITIALIZE_ELEMENT_STATUS:
  2766. case GPCMD_LOAD_UNLOAD:
  2767. case GPCMD_SET_SPEED:
  2768. case MOVE_MEDIUM:
  2769. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  2770. break;
  2771. case REPORT_LUNS:
  2772. cmd->execute_task = target_report_luns;
  2773. size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
  2774. /*
  2775. * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
  2776. * See spc4r17 section 5.3
  2777. */
  2778. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  2779. cmd->sam_task_attr = MSG_HEAD_TAG;
  2780. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2781. break;
  2782. default:
  2783. pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
  2784. " 0x%02x, sending CHECK_CONDITION.\n",
  2785. cmd->se_tfo->get_fabric_name(), cdb[0]);
  2786. goto out_unsupported_cdb;
  2787. }
  2788. if (size != cmd->data_length) {
  2789. pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
  2790. " %u does not match SCSI CDB Length: %u for SAM Opcode:"
  2791. " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
  2792. cmd->data_length, size, cdb[0]);
  2793. cmd->cmd_spdtl = size;
  2794. if (cmd->data_direction == DMA_TO_DEVICE) {
  2795. pr_err("Rejecting underflow/overflow"
  2796. " WRITE data\n");
  2797. goto out_invalid_cdb_field;
  2798. }
  2799. /*
  2800. * Reject READ_* or WRITE_* with overflow/underflow for
  2801. * type SCF_SCSI_DATA_SG_IO_CDB.
  2802. */
  2803. if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
  2804. pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
  2805. " CDB on non 512-byte sector setup subsystem"
  2806. " plugin: %s\n", dev->transport->name);
  2807. /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
  2808. goto out_invalid_cdb_field;
  2809. }
  2810. if (size > cmd->data_length) {
  2811. cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
  2812. cmd->residual_count = (size - cmd->data_length);
  2813. } else {
  2814. cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
  2815. cmd->residual_count = (cmd->data_length - size);
  2816. }
  2817. cmd->data_length = size;
  2818. }
  2819. /* reject any command that we don't have a handler for */
  2820. if (!(passthrough || cmd->execute_task ||
  2821. (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
  2822. goto out_unsupported_cdb;
  2823. /* Let's limit control cdbs to a page, for simplicity's sake. */
  2824. if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
  2825. size > PAGE_SIZE)
  2826. goto out_invalid_cdb_field;
  2827. transport_set_supported_SAM_opcode(cmd);
  2828. return ret;
  2829. out_unsupported_cdb:
  2830. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2831. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  2832. return -EINVAL;
  2833. out_invalid_cdb_field:
  2834. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2835. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  2836. return -EINVAL;
  2837. }
  2838. /*
  2839. * Called from I/O completion to determine which dormant/delayed
  2840. * and ordered cmds need to have their tasks added to the execution queue.
  2841. */
  2842. static void transport_complete_task_attr(struct se_cmd *cmd)
  2843. {
  2844. struct se_device *dev = cmd->se_dev;
  2845. struct se_cmd *cmd_p, *cmd_tmp;
  2846. int new_active_tasks = 0;
  2847. if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
  2848. atomic_dec(&dev->simple_cmds);
  2849. smp_mb__after_atomic_dec();
  2850. dev->dev_cur_ordered_id++;
  2851. pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
  2852. " SIMPLE: %u\n", dev->dev_cur_ordered_id,
  2853. cmd->se_ordered_id);
  2854. } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
  2855. atomic_dec(&dev->dev_hoq_count);
  2856. smp_mb__after_atomic_dec();
  2857. dev->dev_cur_ordered_id++;
  2858. pr_debug("Incremented dev_cur_ordered_id: %u for"
  2859. " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
  2860. cmd->se_ordered_id);
  2861. } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
  2862. spin_lock(&dev->ordered_cmd_lock);
  2863. list_del(&cmd->se_ordered_node);
  2864. atomic_dec(&dev->dev_ordered_sync);
  2865. smp_mb__after_atomic_dec();
  2866. spin_unlock(&dev->ordered_cmd_lock);
  2867. dev->dev_cur_ordered_id++;
  2868. pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
  2869. " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
  2870. }
  2871. /*
  2872. * Process all commands up to the last received
  2873. * ORDERED task attribute which requires another blocking
  2874. * boundary
  2875. */
  2876. spin_lock(&dev->delayed_cmd_lock);
  2877. list_for_each_entry_safe(cmd_p, cmd_tmp,
  2878. &dev->delayed_cmd_list, se_delayed_node) {
  2879. list_del(&cmd_p->se_delayed_node);
  2880. spin_unlock(&dev->delayed_cmd_lock);
  2881. pr_debug("Calling add_tasks() for"
  2882. " cmd_p: 0x%02x Task Attr: 0x%02x"
  2883. " Dormant -> Active, se_ordered_id: %u\n",
  2884. cmd_p->t_task_cdb[0],
  2885. cmd_p->sam_task_attr, cmd_p->se_ordered_id);
  2886. transport_add_tasks_from_cmd(cmd_p);
  2887. new_active_tasks++;
  2888. spin_lock(&dev->delayed_cmd_lock);
  2889. if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
  2890. break;
  2891. }
  2892. spin_unlock(&dev->delayed_cmd_lock);
  2893. /*
  2894. * If new tasks have become active, wake up the transport thread
  2895. * to do the processing of the Active tasks.
  2896. */
  2897. if (new_active_tasks != 0)
  2898. wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
  2899. }
  2900. static void transport_complete_qf(struct se_cmd *cmd)
  2901. {
  2902. int ret = 0;
  2903. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  2904. transport_complete_task_attr(cmd);
  2905. if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
  2906. ret = cmd->se_tfo->queue_status(cmd);
  2907. if (ret)
  2908. goto out;
  2909. }
  2910. switch (cmd->data_direction) {
  2911. case DMA_FROM_DEVICE:
  2912. ret = cmd->se_tfo->queue_data_in(cmd);
  2913. break;
  2914. case DMA_TO_DEVICE:
  2915. if (cmd->t_bidi_data_sg) {
  2916. ret = cmd->se_tfo->queue_data_in(cmd);
  2917. if (ret < 0)
  2918. break;
  2919. }
  2920. /* Fall through for DMA_TO_DEVICE */
  2921. case DMA_NONE:
  2922. ret = cmd->se_tfo->queue_status(cmd);
  2923. break;
  2924. default:
  2925. break;
  2926. }
  2927. out:
  2928. if (ret < 0) {
  2929. transport_handle_queue_full(cmd, cmd->se_dev);
  2930. return;
  2931. }
  2932. transport_lun_remove_cmd(cmd);
  2933. transport_cmd_check_stop_to_fabric(cmd);
  2934. }
  2935. static void transport_handle_queue_full(
  2936. struct se_cmd *cmd,
  2937. struct se_device *dev)
  2938. {
  2939. spin_lock_irq(&dev->qf_cmd_lock);
  2940. list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
  2941. atomic_inc(&dev->dev_qf_count);
  2942. smp_mb__after_atomic_inc();
  2943. spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
  2944. schedule_work(&cmd->se_dev->qf_work_queue);
  2945. }
  2946. static void target_complete_ok_work(struct work_struct *work)
  2947. {
  2948. struct se_cmd *cmd = container_of(work, struct se_cmd, work);
  2949. int reason = 0, ret;
  2950. /*
  2951. * Check if we need to move delayed/dormant tasks from cmds on the
  2952. * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
  2953. * Attribute.
  2954. */
  2955. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  2956. transport_complete_task_attr(cmd);
  2957. /*
  2958. * Check to schedule QUEUE_FULL work, or execute an existing
  2959. * cmd->transport_qf_callback()
  2960. */
  2961. if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
  2962. schedule_work(&cmd->se_dev->qf_work_queue);
  2963. /*
  2964. * Check if we need to retrieve a sense buffer from
  2965. * the struct se_cmd in question.
  2966. */
  2967. if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
  2968. if (transport_get_sense_data(cmd) < 0)
  2969. reason = TCM_NON_EXISTENT_LUN;
  2970. /*
  2971. * Only set when an struct se_task->task_scsi_status returned
  2972. * a non GOOD status.
  2973. */
  2974. if (cmd->scsi_status) {
  2975. ret = transport_send_check_condition_and_sense(
  2976. cmd, reason, 1);
  2977. if (ret == -EAGAIN || ret == -ENOMEM)
  2978. goto queue_full;
  2979. transport_lun_remove_cmd(cmd);
  2980. transport_cmd_check_stop_to_fabric(cmd);
  2981. return;
  2982. }
  2983. }
  2984. /*
  2985. * Check for a callback, used by amongst other things
  2986. * XDWRITE_READ_10 emulation.
  2987. */
  2988. if (cmd->transport_complete_callback)
  2989. cmd->transport_complete_callback(cmd);
  2990. switch (cmd->data_direction) {
  2991. case DMA_FROM_DEVICE:
  2992. spin_lock(&cmd->se_lun->lun_sep_lock);
  2993. if (cmd->se_lun->lun_sep) {
  2994. cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
  2995. cmd->data_length;
  2996. }
  2997. spin_unlock(&cmd->se_lun->lun_sep_lock);
  2998. ret = cmd->se_tfo->queue_data_in(cmd);
  2999. if (ret == -EAGAIN || ret == -ENOMEM)
  3000. goto queue_full;
  3001. break;
  3002. case DMA_TO_DEVICE:
  3003. spin_lock(&cmd->se_lun->lun_sep_lock);
  3004. if (cmd->se_lun->lun_sep) {
  3005. cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
  3006. cmd->data_length;
  3007. }
  3008. spin_unlock(&cmd->se_lun->lun_sep_lock);
  3009. /*
  3010. * Check if we need to send READ payload for BIDI-COMMAND
  3011. */
  3012. if (cmd->t_bidi_data_sg) {
  3013. spin_lock(&cmd->se_lun->lun_sep_lock);
  3014. if (cmd->se_lun->lun_sep) {
  3015. cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
  3016. cmd->data_length;
  3017. }
  3018. spin_unlock(&cmd->se_lun->lun_sep_lock);
  3019. ret = cmd->se_tfo->queue_data_in(cmd);
  3020. if (ret == -EAGAIN || ret == -ENOMEM)
  3021. goto queue_full;
  3022. break;
  3023. }
  3024. /* Fall through for DMA_TO_DEVICE */
  3025. case DMA_NONE:
  3026. ret = cmd->se_tfo->queue_status(cmd);
  3027. if (ret == -EAGAIN || ret == -ENOMEM)
  3028. goto queue_full;
  3029. break;
  3030. default:
  3031. break;
  3032. }
  3033. transport_lun_remove_cmd(cmd);
  3034. transport_cmd_check_stop_to_fabric(cmd);
  3035. return;
  3036. queue_full:
  3037. pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
  3038. " data_direction: %d\n", cmd, cmd->data_direction);
  3039. cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
  3040. transport_handle_queue_full(cmd, cmd->se_dev);
  3041. }
  3042. static void transport_free_dev_tasks(struct se_cmd *cmd)
  3043. {
  3044. struct se_task *task, *task_tmp;
  3045. unsigned long flags;
  3046. LIST_HEAD(dispose_list);
  3047. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3048. list_for_each_entry_safe(task, task_tmp,
  3049. &cmd->t_task_list, t_list) {
  3050. if (!(task->task_flags & TF_ACTIVE))
  3051. list_move_tail(&task->t_list, &dispose_list);
  3052. }
  3053. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3054. while (!list_empty(&dispose_list)) {
  3055. task = list_first_entry(&dispose_list, struct se_task, t_list);
  3056. if (task->task_sg != cmd->t_data_sg &&
  3057. task->task_sg != cmd->t_bidi_data_sg)
  3058. kfree(task->task_sg);
  3059. list_del(&task->t_list);
  3060. cmd->se_dev->transport->free_task(task);
  3061. }
  3062. }
  3063. static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
  3064. {
  3065. struct scatterlist *sg;
  3066. int count;
  3067. for_each_sg(sgl, sg, nents, count)
  3068. __free_page(sg_page(sg));
  3069. kfree(sgl);
  3070. }
  3071. static inline void transport_free_pages(struct se_cmd *cmd)
  3072. {
  3073. if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
  3074. return;
  3075. transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
  3076. cmd->t_data_sg = NULL;
  3077. cmd->t_data_nents = 0;
  3078. transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
  3079. cmd->t_bidi_data_sg = NULL;
  3080. cmd->t_bidi_data_nents = 0;
  3081. }
  3082. /**
  3083. * transport_put_cmd - release a reference to a command
  3084. * @cmd: command to release
  3085. *
  3086. * This routine releases our reference to the command and frees it if possible.
  3087. */
  3088. static void transport_put_cmd(struct se_cmd *cmd)
  3089. {
  3090. unsigned long flags;
  3091. int free_tasks = 0;
  3092. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3093. if (atomic_read(&cmd->t_fe_count)) {
  3094. if (!atomic_dec_and_test(&cmd->t_fe_count))
  3095. goto out_busy;
  3096. }
  3097. if (atomic_read(&cmd->t_se_count)) {
  3098. if (!atomic_dec_and_test(&cmd->t_se_count))
  3099. goto out_busy;
  3100. }
  3101. if (atomic_read(&cmd->transport_dev_active)) {
  3102. atomic_set(&cmd->transport_dev_active, 0);
  3103. transport_all_task_dev_remove_state(cmd);
  3104. free_tasks = 1;
  3105. }
  3106. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3107. if (free_tasks != 0)
  3108. transport_free_dev_tasks(cmd);
  3109. transport_free_pages(cmd);
  3110. transport_release_cmd(cmd);
  3111. return;
  3112. out_busy:
  3113. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3114. }
  3115. /*
  3116. * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
  3117. * allocating in the core.
  3118. * @cmd: Associated se_cmd descriptor
  3119. * @mem: SGL style memory for TCM WRITE / READ
  3120. * @sg_mem_num: Number of SGL elements
  3121. * @mem_bidi_in: SGL style memory for TCM BIDI READ
  3122. * @sg_mem_bidi_num: Number of BIDI READ SGL elements
  3123. *
  3124. * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
  3125. * of parameters.
  3126. */
  3127. int transport_generic_map_mem_to_cmd(
  3128. struct se_cmd *cmd,
  3129. struct scatterlist *sgl,
  3130. u32 sgl_count,
  3131. struct scatterlist *sgl_bidi,
  3132. u32 sgl_bidi_count)
  3133. {
  3134. if (!sgl || !sgl_count)
  3135. return 0;
  3136. if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
  3137. (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
  3138. cmd->t_data_sg = sgl;
  3139. cmd->t_data_nents = sgl_count;
  3140. if (sgl_bidi && sgl_bidi_count) {
  3141. cmd->t_bidi_data_sg = sgl_bidi;
  3142. cmd->t_bidi_data_nents = sgl_bidi_count;
  3143. }
  3144. cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  3145. }
  3146. return 0;
  3147. }
  3148. EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
  3149. void *transport_kmap_first_data_page(struct se_cmd *cmd)
  3150. {
  3151. struct scatterlist *sg = cmd->t_data_sg;
  3152. BUG_ON(!sg);
  3153. /*
  3154. * We need to take into account a possible offset here for fabrics like
  3155. * tcm_loop who may be using a contig buffer from the SCSI midlayer for
  3156. * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
  3157. */
  3158. return kmap(sg_page(sg)) + sg->offset;
  3159. }
  3160. EXPORT_SYMBOL(transport_kmap_first_data_page);
  3161. void transport_kunmap_first_data_page(struct se_cmd *cmd)
  3162. {
  3163. kunmap(sg_page(cmd->t_data_sg));
  3164. }
  3165. EXPORT_SYMBOL(transport_kunmap_first_data_page);
  3166. static int
  3167. transport_generic_get_mem(struct se_cmd *cmd)
  3168. {
  3169. u32 length = cmd->data_length;
  3170. unsigned int nents;
  3171. struct page *page;
  3172. int i = 0;
  3173. nents = DIV_ROUND_UP(length, PAGE_SIZE);
  3174. cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
  3175. if (!cmd->t_data_sg)
  3176. return -ENOMEM;
  3177. cmd->t_data_nents = nents;
  3178. sg_init_table(cmd->t_data_sg, nents);
  3179. while (length) {
  3180. u32 page_len = min_t(u32, length, PAGE_SIZE);
  3181. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  3182. if (!page)
  3183. goto out;
  3184. sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
  3185. length -= page_len;
  3186. i++;
  3187. }
  3188. return 0;
  3189. out:
  3190. while (i >= 0) {
  3191. __free_page(sg_page(&cmd->t_data_sg[i]));
  3192. i--;
  3193. }
  3194. kfree(cmd->t_data_sg);
  3195. cmd->t_data_sg = NULL;
  3196. return -ENOMEM;
  3197. }
  3198. /* Reduce sectors if they are too long for the device */
  3199. static inline sector_t transport_limit_task_sectors(
  3200. struct se_device *dev,
  3201. unsigned long long lba,
  3202. sector_t sectors)
  3203. {
  3204. sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
  3205. if (dev->transport->get_device_type(dev) == TYPE_DISK)
  3206. if ((lba + sectors) > transport_dev_end_lba(dev))
  3207. sectors = ((transport_dev_end_lba(dev) - lba) + 1);
  3208. return sectors;
  3209. }
  3210. /*
  3211. * This function can be used by HW target mode drivers to create a linked
  3212. * scatterlist from all contiguously allocated struct se_task->task_sg[].
  3213. * This is intended to be called during the completion path by TCM Core
  3214. * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
  3215. */
  3216. void transport_do_task_sg_chain(struct se_cmd *cmd)
  3217. {
  3218. struct scatterlist *sg_first = NULL;
  3219. struct scatterlist *sg_prev = NULL;
  3220. int sg_prev_nents = 0;
  3221. struct scatterlist *sg;
  3222. struct se_task *task;
  3223. u32 chained_nents = 0;
  3224. int i;
  3225. BUG_ON(!cmd->se_tfo->task_sg_chaining);
  3226. /*
  3227. * Walk the struct se_task list and setup scatterlist chains
  3228. * for each contiguously allocated struct se_task->task_sg[].
  3229. */
  3230. list_for_each_entry(task, &cmd->t_task_list, t_list) {
  3231. if (!task->task_sg)
  3232. continue;
  3233. if (!sg_first) {
  3234. sg_first = task->task_sg;
  3235. chained_nents = task->task_sg_nents;
  3236. } else {
  3237. sg_chain(sg_prev, sg_prev_nents, task->task_sg);
  3238. chained_nents += task->task_sg_nents;
  3239. }
  3240. /*
  3241. * For the padded tasks, use the extra SGL vector allocated
  3242. * in transport_allocate_data_tasks() for the sg_prev_nents
  3243. * offset into sg_chain() above.
  3244. *
  3245. * We do not need the padding for the last task (or a single
  3246. * task), but in that case we will never use the sg_prev_nents
  3247. * value below which would be incorrect.
  3248. */
  3249. sg_prev_nents = (task->task_sg_nents + 1);
  3250. sg_prev = task->task_sg;
  3251. }
  3252. /*
  3253. * Setup the starting pointer and total t_tasks_sg_linked_no including
  3254. * padding SGs for linking and to mark the end.
  3255. */
  3256. cmd->t_tasks_sg_chained = sg_first;
  3257. cmd->t_tasks_sg_chained_no = chained_nents;
  3258. pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
  3259. " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
  3260. cmd->t_tasks_sg_chained_no);
  3261. for_each_sg(cmd->t_tasks_sg_chained, sg,
  3262. cmd->t_tasks_sg_chained_no, i) {
  3263. pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
  3264. i, sg, sg_page(sg), sg->length, sg->offset);
  3265. if (sg_is_chain(sg))
  3266. pr_debug("SG: %p sg_is_chain=1\n", sg);
  3267. if (sg_is_last(sg))
  3268. pr_debug("SG: %p sg_is_last=1\n", sg);
  3269. }
  3270. }
  3271. EXPORT_SYMBOL(transport_do_task_sg_chain);
  3272. /*
  3273. * Break up cmd into chunks transport can handle
  3274. */
  3275. static int
  3276. transport_allocate_data_tasks(struct se_cmd *cmd,
  3277. enum dma_data_direction data_direction,
  3278. struct scatterlist *cmd_sg, unsigned int sgl_nents)
  3279. {
  3280. struct se_device *dev = cmd->se_dev;
  3281. int task_count, i;
  3282. unsigned long long lba;
  3283. sector_t sectors, dev_max_sectors;
  3284. u32 sector_size;
  3285. if (transport_cmd_get_valid_sectors(cmd) < 0)
  3286. return -EINVAL;
  3287. dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
  3288. sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
  3289. WARN_ON(cmd->data_length % sector_size);
  3290. lba = cmd->t_task_lba;
  3291. sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
  3292. task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
  3293. /*
  3294. * If we need just a single task reuse the SG list in the command
  3295. * and avoid a lot of work.
  3296. */
  3297. if (task_count == 1) {
  3298. struct se_task *task;
  3299. unsigned long flags;
  3300. task = transport_generic_get_task(cmd, data_direction);
  3301. if (!task)
  3302. return -ENOMEM;
  3303. task->task_sg = cmd_sg;
  3304. task->task_sg_nents = sgl_nents;
  3305. task->task_lba = lba;
  3306. task->task_sectors = sectors;
  3307. task->task_size = task->task_sectors * sector_size;
  3308. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3309. list_add_tail(&task->t_list, &cmd->t_task_list);
  3310. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3311. return task_count;
  3312. }
  3313. for (i = 0; i < task_count; i++) {
  3314. struct se_task *task;
  3315. unsigned int task_size, task_sg_nents_padded;
  3316. struct scatterlist *sg;
  3317. unsigned long flags;
  3318. int count;
  3319. task = transport_generic_get_task(cmd, data_direction);
  3320. if (!task)
  3321. return -ENOMEM;
  3322. task->task_lba = lba;
  3323. task->task_sectors = min(sectors, dev_max_sectors);
  3324. task->task_size = task->task_sectors * sector_size;
  3325. /*
  3326. * This now assumes that passed sg_ents are in PAGE_SIZE chunks
  3327. * in order to calculate the number per task SGL entries
  3328. */
  3329. task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
  3330. /*
  3331. * Check if the fabric module driver is requesting that all
  3332. * struct se_task->task_sg[] be chained together.. If so,
  3333. * then allocate an extra padding SG entry for linking and
  3334. * marking the end of the chained SGL for every task except
  3335. * the last one for (task_count > 1) operation, or skipping
  3336. * the extra padding for the (task_count == 1) case.
  3337. */
  3338. if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
  3339. task_sg_nents_padded = (task->task_sg_nents + 1);
  3340. } else
  3341. task_sg_nents_padded = task->task_sg_nents;
  3342. task->task_sg = kmalloc(sizeof(struct scatterlist) *
  3343. task_sg_nents_padded, GFP_KERNEL);
  3344. if (!task->task_sg) {
  3345. cmd->se_dev->transport->free_task(task);
  3346. return -ENOMEM;
  3347. }
  3348. sg_init_table(task->task_sg, task_sg_nents_padded);
  3349. task_size = task->task_size;
  3350. /* Build new sgl, only up to task_size */
  3351. for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
  3352. if (cmd_sg->length > task_size)
  3353. break;
  3354. *sg = *cmd_sg;
  3355. task_size -= cmd_sg->length;
  3356. cmd_sg = sg_next(cmd_sg);
  3357. }
  3358. lba += task->task_sectors;
  3359. sectors -= task->task_sectors;
  3360. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3361. list_add_tail(&task->t_list, &cmd->t_task_list);
  3362. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3363. }
  3364. return task_count;
  3365. }
  3366. static int
  3367. transport_allocate_control_task(struct se_cmd *cmd)
  3368. {
  3369. struct se_task *task;
  3370. unsigned long flags;
  3371. task = transport_generic_get_task(cmd, cmd->data_direction);
  3372. if (!task)
  3373. return -ENOMEM;
  3374. task->task_sg = cmd->t_data_sg;
  3375. task->task_size = cmd->data_length;
  3376. task->task_sg_nents = cmd->t_data_nents;
  3377. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3378. list_add_tail(&task->t_list, &cmd->t_task_list);
  3379. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3380. /* Success! Return number of tasks allocated */
  3381. return 1;
  3382. }
  3383. /*
  3384. * Allocate any required ressources to execute the command, and either place
  3385. * it on the execution queue if possible. For writes we might not have the
  3386. * payload yet, thus notify the fabric via a call to ->write_pending instead.
  3387. */
  3388. int transport_generic_new_cmd(struct se_cmd *cmd)
  3389. {
  3390. struct se_device *dev = cmd->se_dev;
  3391. int task_cdbs, task_cdbs_bidi = 0;
  3392. int set_counts = 1;
  3393. int ret = 0;
  3394. /*
  3395. * Determine is the TCM fabric module has already allocated physical
  3396. * memory, and is directly calling transport_generic_map_mem_to_cmd()
  3397. * beforehand.
  3398. */
  3399. if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
  3400. cmd->data_length) {
  3401. ret = transport_generic_get_mem(cmd);
  3402. if (ret < 0)
  3403. return ret;
  3404. }
  3405. /*
  3406. * For BIDI command set up the read tasks first.
  3407. */
  3408. if (cmd->t_bidi_data_sg &&
  3409. dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
  3410. BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
  3411. task_cdbs_bidi = transport_allocate_data_tasks(cmd,
  3412. DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
  3413. cmd->t_bidi_data_nents);
  3414. if (task_cdbs_bidi <= 0)
  3415. goto out_fail;
  3416. atomic_inc(&cmd->t_fe_count);
  3417. atomic_inc(&cmd->t_se_count);
  3418. set_counts = 0;
  3419. }
  3420. if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
  3421. task_cdbs = transport_allocate_data_tasks(cmd,
  3422. cmd->data_direction, cmd->t_data_sg,
  3423. cmd->t_data_nents);
  3424. } else {
  3425. task_cdbs = transport_allocate_control_task(cmd);
  3426. }
  3427. if (task_cdbs <= 0)
  3428. goto out_fail;
  3429. if (set_counts) {
  3430. atomic_inc(&cmd->t_fe_count);
  3431. atomic_inc(&cmd->t_se_count);
  3432. }
  3433. cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
  3434. atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
  3435. atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
  3436. /*
  3437. * For WRITEs, let the fabric know its buffer is ready..
  3438. * This WRITE struct se_cmd (and all of its associated struct se_task's)
  3439. * will be added to the struct se_device execution queue after its WRITE
  3440. * data has arrived. (ie: It gets handled by the transport processing
  3441. * thread a second time)
  3442. */
  3443. if (cmd->data_direction == DMA_TO_DEVICE) {
  3444. transport_add_tasks_to_state_queue(cmd);
  3445. return transport_generic_write_pending(cmd);
  3446. }
  3447. /*
  3448. * Everything else but a WRITE, add the struct se_cmd's struct se_task's
  3449. * to the execution queue.
  3450. */
  3451. transport_execute_tasks(cmd);
  3452. return 0;
  3453. out_fail:
  3454. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  3455. cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  3456. return -EINVAL;
  3457. }
  3458. EXPORT_SYMBOL(transport_generic_new_cmd);
  3459. /* transport_generic_process_write():
  3460. *
  3461. *
  3462. */
  3463. void transport_generic_process_write(struct se_cmd *cmd)
  3464. {
  3465. transport_execute_tasks(cmd);
  3466. }
  3467. EXPORT_SYMBOL(transport_generic_process_write);
  3468. static void transport_write_pending_qf(struct se_cmd *cmd)
  3469. {
  3470. int ret;
  3471. ret = cmd->se_tfo->write_pending(cmd);
  3472. if (ret == -EAGAIN || ret == -ENOMEM) {
  3473. pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
  3474. cmd);
  3475. transport_handle_queue_full(cmd, cmd->se_dev);
  3476. }
  3477. }
  3478. static int transport_generic_write_pending(struct se_cmd *cmd)
  3479. {
  3480. unsigned long flags;
  3481. int ret;
  3482. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3483. cmd->t_state = TRANSPORT_WRITE_PENDING;
  3484. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3485. /*
  3486. * Clear the se_cmd for WRITE_PENDING status in order to set
  3487. * cmd->t_transport_active=0 so that transport_generic_handle_data
  3488. * can be called from HW target mode interrupt code. This is safe
  3489. * to be called with transport_off=1 before the cmd->se_tfo->write_pending
  3490. * because the se_cmd->se_lun pointer is not being cleared.
  3491. */
  3492. transport_cmd_check_stop(cmd, 1, 0);
  3493. /*
  3494. * Call the fabric write_pending function here to let the
  3495. * frontend know that WRITE buffers are ready.
  3496. */
  3497. ret = cmd->se_tfo->write_pending(cmd);
  3498. if (ret == -EAGAIN || ret == -ENOMEM)
  3499. goto queue_full;
  3500. else if (ret < 0)
  3501. return ret;
  3502. return PYX_TRANSPORT_WRITE_PENDING;
  3503. queue_full:
  3504. pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
  3505. cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
  3506. transport_handle_queue_full(cmd, cmd->se_dev);
  3507. return 0;
  3508. }
  3509. /**
  3510. * transport_release_cmd - free a command
  3511. * @cmd: command to free
  3512. *
  3513. * This routine unconditionally frees a command, and reference counting
  3514. * or list removal must be done in the caller.
  3515. */
  3516. void transport_release_cmd(struct se_cmd *cmd)
  3517. {
  3518. BUG_ON(!cmd->se_tfo);
  3519. if (cmd->se_tmr_req)
  3520. core_tmr_release_req(cmd->se_tmr_req);
  3521. if (cmd->t_task_cdb != cmd->__t_task_cdb)
  3522. kfree(cmd->t_task_cdb);
  3523. /*
  3524. * Check if target_wait_for_sess_cmds() is expecting to
  3525. * release se_cmd directly here..
  3526. */
  3527. if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
  3528. if (cmd->se_tfo->check_release_cmd(cmd) != 0)
  3529. return;
  3530. cmd->se_tfo->release_cmd(cmd);
  3531. }
  3532. EXPORT_SYMBOL(transport_release_cmd);
  3533. void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
  3534. {
  3535. if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
  3536. if (wait_for_tasks && cmd->se_tmr_req)
  3537. transport_wait_for_tasks(cmd);
  3538. transport_release_cmd(cmd);
  3539. } else {
  3540. if (wait_for_tasks)
  3541. transport_wait_for_tasks(cmd);
  3542. core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
  3543. if (cmd->se_lun)
  3544. transport_lun_remove_cmd(cmd);
  3545. transport_free_dev_tasks(cmd);
  3546. transport_put_cmd(cmd);
  3547. }
  3548. }
  3549. EXPORT_SYMBOL(transport_generic_free_cmd);
  3550. /* target_get_sess_cmd - Add command to active ->sess_cmd_list
  3551. * @se_sess: session to reference
  3552. * @se_cmd: command descriptor to add
  3553. */
  3554. void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
  3555. {
  3556. unsigned long flags;
  3557. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  3558. list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
  3559. se_cmd->check_release = 1;
  3560. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3561. }
  3562. EXPORT_SYMBOL(target_get_sess_cmd);
  3563. /* target_put_sess_cmd - Check for active I/O shutdown or list delete
  3564. * @se_sess: session to reference
  3565. * @se_cmd: command descriptor to drop
  3566. */
  3567. int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
  3568. {
  3569. unsigned long flags;
  3570. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  3571. if (list_empty(&se_cmd->se_cmd_list)) {
  3572. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3573. WARN_ON(1);
  3574. return 0;
  3575. }
  3576. if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
  3577. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3578. complete(&se_cmd->cmd_wait_comp);
  3579. return 1;
  3580. }
  3581. list_del(&se_cmd->se_cmd_list);
  3582. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3583. return 0;
  3584. }
  3585. EXPORT_SYMBOL(target_put_sess_cmd);
  3586. /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
  3587. * @se_sess: session to split
  3588. */
  3589. void target_splice_sess_cmd_list(struct se_session *se_sess)
  3590. {
  3591. struct se_cmd *se_cmd;
  3592. unsigned long flags;
  3593. WARN_ON(!list_empty(&se_sess->sess_wait_list));
  3594. INIT_LIST_HEAD(&se_sess->sess_wait_list);
  3595. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  3596. se_sess->sess_tearing_down = 1;
  3597. list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
  3598. list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
  3599. se_cmd->cmd_wait_set = 1;
  3600. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  3601. }
  3602. EXPORT_SYMBOL(target_splice_sess_cmd_list);
  3603. /* target_wait_for_sess_cmds - Wait for outstanding descriptors
  3604. * @se_sess: session to wait for active I/O
  3605. * @wait_for_tasks: Make extra transport_wait_for_tasks call
  3606. */
  3607. void target_wait_for_sess_cmds(
  3608. struct se_session *se_sess,
  3609. int wait_for_tasks)
  3610. {
  3611. struct se_cmd *se_cmd, *tmp_cmd;
  3612. bool rc = false;
  3613. list_for_each_entry_safe(se_cmd, tmp_cmd,
  3614. &se_sess->sess_wait_list, se_cmd_list) {
  3615. list_del(&se_cmd->se_cmd_list);
  3616. pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
  3617. " %d\n", se_cmd, se_cmd->t_state,
  3618. se_cmd->se_tfo->get_cmd_state(se_cmd));
  3619. if (wait_for_tasks) {
  3620. pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
  3621. " fabric state: %d\n", se_cmd, se_cmd->t_state,
  3622. se_cmd->se_tfo->get_cmd_state(se_cmd));
  3623. rc = transport_wait_for_tasks(se_cmd);
  3624. pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
  3625. " fabric state: %d\n", se_cmd, se_cmd->t_state,
  3626. se_cmd->se_tfo->get_cmd_state(se_cmd));
  3627. }
  3628. if (!rc) {
  3629. wait_for_completion(&se_cmd->cmd_wait_comp);
  3630. pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
  3631. " fabric state: %d\n", se_cmd, se_cmd->t_state,
  3632. se_cmd->se_tfo->get_cmd_state(se_cmd));
  3633. }
  3634. se_cmd->se_tfo->release_cmd(se_cmd);
  3635. }
  3636. }
  3637. EXPORT_SYMBOL(target_wait_for_sess_cmds);
  3638. /* transport_lun_wait_for_tasks():
  3639. *
  3640. * Called from ConfigFS context to stop the passed struct se_cmd to allow
  3641. * an struct se_lun to be successfully shutdown.
  3642. */
  3643. static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
  3644. {
  3645. unsigned long flags;
  3646. int ret;
  3647. /*
  3648. * If the frontend has already requested this struct se_cmd to
  3649. * be stopped, we can safely ignore this struct se_cmd.
  3650. */
  3651. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3652. if (atomic_read(&cmd->t_transport_stop)) {
  3653. atomic_set(&cmd->transport_lun_stop, 0);
  3654. pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
  3655. " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
  3656. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3657. transport_cmd_check_stop(cmd, 1, 0);
  3658. return -EPERM;
  3659. }
  3660. atomic_set(&cmd->transport_lun_fe_stop, 1);
  3661. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3662. wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
  3663. ret = transport_stop_tasks_for_cmd(cmd);
  3664. pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
  3665. " %d\n", cmd, cmd->t_task_list_num, ret);
  3666. if (!ret) {
  3667. pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
  3668. cmd->se_tfo->get_task_tag(cmd));
  3669. wait_for_completion(&cmd->transport_lun_stop_comp);
  3670. pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
  3671. cmd->se_tfo->get_task_tag(cmd));
  3672. }
  3673. transport_remove_cmd_from_queue(cmd);
  3674. return 0;
  3675. }
  3676. static void __transport_clear_lun_from_sessions(struct se_lun *lun)
  3677. {
  3678. struct se_cmd *cmd = NULL;
  3679. unsigned long lun_flags, cmd_flags;
  3680. /*
  3681. * Do exception processing and return CHECK_CONDITION status to the
  3682. * Initiator Port.
  3683. */
  3684. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  3685. while (!list_empty(&lun->lun_cmd_list)) {
  3686. cmd = list_first_entry(&lun->lun_cmd_list,
  3687. struct se_cmd, se_lun_node);
  3688. list_del(&cmd->se_lun_node);
  3689. atomic_set(&cmd->transport_lun_active, 0);
  3690. /*
  3691. * This will notify iscsi_target_transport.c:
  3692. * transport_cmd_check_stop() that a LUN shutdown is in
  3693. * progress for the iscsi_cmd_t.
  3694. */
  3695. spin_lock(&cmd->t_state_lock);
  3696. pr_debug("SE_LUN[%d] - Setting cmd->transport"
  3697. "_lun_stop for ITT: 0x%08x\n",
  3698. cmd->se_lun->unpacked_lun,
  3699. cmd->se_tfo->get_task_tag(cmd));
  3700. atomic_set(&cmd->transport_lun_stop, 1);
  3701. spin_unlock(&cmd->t_state_lock);
  3702. spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
  3703. if (!cmd->se_lun) {
  3704. pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
  3705. cmd->se_tfo->get_task_tag(cmd),
  3706. cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
  3707. BUG();
  3708. }
  3709. /*
  3710. * If the Storage engine still owns the iscsi_cmd_t, determine
  3711. * and/or stop its context.
  3712. */
  3713. pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
  3714. "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
  3715. cmd->se_tfo->get_task_tag(cmd));
  3716. if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
  3717. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  3718. continue;
  3719. }
  3720. pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
  3721. "_wait_for_tasks(): SUCCESS\n",
  3722. cmd->se_lun->unpacked_lun,
  3723. cmd->se_tfo->get_task_tag(cmd));
  3724. spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
  3725. if (!atomic_read(&cmd->transport_dev_active)) {
  3726. spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
  3727. goto check_cond;
  3728. }
  3729. atomic_set(&cmd->transport_dev_active, 0);
  3730. transport_all_task_dev_remove_state(cmd);
  3731. spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
  3732. transport_free_dev_tasks(cmd);
  3733. /*
  3734. * The Storage engine stopped this struct se_cmd before it was
  3735. * send to the fabric frontend for delivery back to the
  3736. * Initiator Node. Return this SCSI CDB back with an
  3737. * CHECK_CONDITION status.
  3738. */
  3739. check_cond:
  3740. transport_send_check_condition_and_sense(cmd,
  3741. TCM_NON_EXISTENT_LUN, 0);
  3742. /*
  3743. * If the fabric frontend is waiting for this iscsi_cmd_t to
  3744. * be released, notify the waiting thread now that LU has
  3745. * finished accessing it.
  3746. */
  3747. spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
  3748. if (atomic_read(&cmd->transport_lun_fe_stop)) {
  3749. pr_debug("SE_LUN[%d] - Detected FE stop for"
  3750. " struct se_cmd: %p ITT: 0x%08x\n",
  3751. lun->unpacked_lun,
  3752. cmd, cmd->se_tfo->get_task_tag(cmd));
  3753. spin_unlock_irqrestore(&cmd->t_state_lock,
  3754. cmd_flags);
  3755. transport_cmd_check_stop(cmd, 1, 0);
  3756. complete(&cmd->transport_lun_fe_stop_comp);
  3757. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  3758. continue;
  3759. }
  3760. pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
  3761. lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
  3762. spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
  3763. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  3764. }
  3765. spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
  3766. }
  3767. static int transport_clear_lun_thread(void *p)
  3768. {
  3769. struct se_lun *lun = (struct se_lun *)p;
  3770. __transport_clear_lun_from_sessions(lun);
  3771. complete(&lun->lun_shutdown_comp);
  3772. return 0;
  3773. }
  3774. int transport_clear_lun_from_sessions(struct se_lun *lun)
  3775. {
  3776. struct task_struct *kt;
  3777. kt = kthread_run(transport_clear_lun_thread, lun,
  3778. "tcm_cl_%u", lun->unpacked_lun);
  3779. if (IS_ERR(kt)) {
  3780. pr_err("Unable to start clear_lun thread\n");
  3781. return PTR_ERR(kt);
  3782. }
  3783. wait_for_completion(&lun->lun_shutdown_comp);
  3784. return 0;
  3785. }
  3786. /**
  3787. * transport_wait_for_tasks - wait for completion to occur
  3788. * @cmd: command to wait
  3789. *
  3790. * Called from frontend fabric context to wait for storage engine
  3791. * to pause and/or release frontend generated struct se_cmd.
  3792. */
  3793. bool transport_wait_for_tasks(struct se_cmd *cmd)
  3794. {
  3795. unsigned long flags;
  3796. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3797. if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
  3798. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3799. return false;
  3800. }
  3801. /*
  3802. * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
  3803. * has been set in transport_set_supported_SAM_opcode().
  3804. */
  3805. if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
  3806. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3807. return false;
  3808. }
  3809. /*
  3810. * If we are already stopped due to an external event (ie: LUN shutdown)
  3811. * sleep until the connection can have the passed struct se_cmd back.
  3812. * The cmd->transport_lun_stopped_sem will be upped by
  3813. * transport_clear_lun_from_sessions() once the ConfigFS context caller
  3814. * has completed its operation on the struct se_cmd.
  3815. */
  3816. if (atomic_read(&cmd->transport_lun_stop)) {
  3817. pr_debug("wait_for_tasks: Stopping"
  3818. " wait_for_completion(&cmd->t_tasktransport_lun_fe"
  3819. "_stop_comp); for ITT: 0x%08x\n",
  3820. cmd->se_tfo->get_task_tag(cmd));
  3821. /*
  3822. * There is a special case for WRITES where a FE exception +
  3823. * LUN shutdown means ConfigFS context is still sleeping on
  3824. * transport_lun_stop_comp in transport_lun_wait_for_tasks().
  3825. * We go ahead and up transport_lun_stop_comp just to be sure
  3826. * here.
  3827. */
  3828. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3829. complete(&cmd->transport_lun_stop_comp);
  3830. wait_for_completion(&cmd->transport_lun_fe_stop_comp);
  3831. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3832. transport_all_task_dev_remove_state(cmd);
  3833. /*
  3834. * At this point, the frontend who was the originator of this
  3835. * struct se_cmd, now owns the structure and can be released through
  3836. * normal means below.
  3837. */
  3838. pr_debug("wait_for_tasks: Stopped"
  3839. " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
  3840. "stop_comp); for ITT: 0x%08x\n",
  3841. cmd->se_tfo->get_task_tag(cmd));
  3842. atomic_set(&cmd->transport_lun_stop, 0);
  3843. }
  3844. if (!atomic_read(&cmd->t_transport_active) ||
  3845. atomic_read(&cmd->t_transport_aborted)) {
  3846. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3847. return false;
  3848. }
  3849. atomic_set(&cmd->t_transport_stop, 1);
  3850. pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
  3851. " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
  3852. cmd, cmd->se_tfo->get_task_tag(cmd),
  3853. cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
  3854. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3855. wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
  3856. wait_for_completion(&cmd->t_transport_stop_comp);
  3857. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3858. atomic_set(&cmd->t_transport_active, 0);
  3859. atomic_set(&cmd->t_transport_stop, 0);
  3860. pr_debug("wait_for_tasks: Stopped wait_for_compltion("
  3861. "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
  3862. cmd->se_tfo->get_task_tag(cmd));
  3863. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3864. return true;
  3865. }
  3866. EXPORT_SYMBOL(transport_wait_for_tasks);
  3867. static int transport_get_sense_codes(
  3868. struct se_cmd *cmd,
  3869. u8 *asc,
  3870. u8 *ascq)
  3871. {
  3872. *asc = cmd->scsi_asc;
  3873. *ascq = cmd->scsi_ascq;
  3874. return 0;
  3875. }
  3876. static int transport_set_sense_codes(
  3877. struct se_cmd *cmd,
  3878. u8 asc,
  3879. u8 ascq)
  3880. {
  3881. cmd->scsi_asc = asc;
  3882. cmd->scsi_ascq = ascq;
  3883. return 0;
  3884. }
  3885. int transport_send_check_condition_and_sense(
  3886. struct se_cmd *cmd,
  3887. u8 reason,
  3888. int from_transport)
  3889. {
  3890. unsigned char *buffer = cmd->sense_buffer;
  3891. unsigned long flags;
  3892. int offset;
  3893. u8 asc = 0, ascq = 0;
  3894. spin_lock_irqsave(&cmd->t_state_lock, flags);
  3895. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
  3896. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3897. return 0;
  3898. }
  3899. cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
  3900. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  3901. if (!reason && from_transport)
  3902. goto after_reason;
  3903. if (!from_transport)
  3904. cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
  3905. /*
  3906. * Data Segment and SenseLength of the fabric response PDU.
  3907. *
  3908. * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
  3909. * from include/scsi/scsi_cmnd.h
  3910. */
  3911. offset = cmd->se_tfo->set_fabric_sense_len(cmd,
  3912. TRANSPORT_SENSE_BUFFER);
  3913. /*
  3914. * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
  3915. * SENSE KEY values from include/scsi/scsi.h
  3916. */
  3917. switch (reason) {
  3918. case TCM_NON_EXISTENT_LUN:
  3919. /* CURRENT ERROR */
  3920. buffer[offset] = 0x70;
  3921. /* ILLEGAL REQUEST */
  3922. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  3923. /* LOGICAL UNIT NOT SUPPORTED */
  3924. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
  3925. break;
  3926. case TCM_UNSUPPORTED_SCSI_OPCODE:
  3927. case TCM_SECTOR_COUNT_TOO_MANY:
  3928. /* CURRENT ERROR */
  3929. buffer[offset] = 0x70;
  3930. /* ILLEGAL REQUEST */
  3931. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  3932. /* INVALID COMMAND OPERATION CODE */
  3933. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
  3934. break;
  3935. case TCM_UNKNOWN_MODE_PAGE:
  3936. /* CURRENT ERROR */
  3937. buffer[offset] = 0x70;
  3938. /* ILLEGAL REQUEST */
  3939. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  3940. /* INVALID FIELD IN CDB */
  3941. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
  3942. break;
  3943. case TCM_CHECK_CONDITION_ABORT_CMD:
  3944. /* CURRENT ERROR */
  3945. buffer[offset] = 0x70;
  3946. /* ABORTED COMMAND */
  3947. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  3948. /* BUS DEVICE RESET FUNCTION OCCURRED */
  3949. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
  3950. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
  3951. break;
  3952. case TCM_INCORRECT_AMOUNT_OF_DATA:
  3953. /* CURRENT ERROR */
  3954. buffer[offset] = 0x70;
  3955. /* ABORTED COMMAND */
  3956. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  3957. /* WRITE ERROR */
  3958. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
  3959. /* NOT ENOUGH UNSOLICITED DATA */
  3960. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
  3961. break;
  3962. case TCM_INVALID_CDB_FIELD:
  3963. /* CURRENT ERROR */
  3964. buffer[offset] = 0x70;
  3965. /* ABORTED COMMAND */
  3966. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  3967. /* INVALID FIELD IN CDB */
  3968. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
  3969. break;
  3970. case TCM_INVALID_PARAMETER_LIST:
  3971. /* CURRENT ERROR */
  3972. buffer[offset] = 0x70;
  3973. /* ABORTED COMMAND */
  3974. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  3975. /* INVALID FIELD IN PARAMETER LIST */
  3976. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
  3977. break;
  3978. case TCM_UNEXPECTED_UNSOLICITED_DATA:
  3979. /* CURRENT ERROR */
  3980. buffer[offset] = 0x70;
  3981. /* ABORTED COMMAND */
  3982. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  3983. /* WRITE ERROR */
  3984. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
  3985. /* UNEXPECTED_UNSOLICITED_DATA */
  3986. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
  3987. break;
  3988. case TCM_SERVICE_CRC_ERROR:
  3989. /* CURRENT ERROR */
  3990. buffer[offset] = 0x70;
  3991. /* ABORTED COMMAND */
  3992. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  3993. /* PROTOCOL SERVICE CRC ERROR */
  3994. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
  3995. /* N/A */
  3996. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
  3997. break;
  3998. case TCM_SNACK_REJECTED:
  3999. /* CURRENT ERROR */
  4000. buffer[offset] = 0x70;
  4001. /* ABORTED COMMAND */
  4002. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  4003. /* READ ERROR */
  4004. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
  4005. /* FAILED RETRANSMISSION REQUEST */
  4006. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
  4007. break;
  4008. case TCM_WRITE_PROTECTED:
  4009. /* CURRENT ERROR */
  4010. buffer[offset] = 0x70;
  4011. /* DATA PROTECT */
  4012. buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
  4013. /* WRITE PROTECTED */
  4014. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
  4015. break;
  4016. case TCM_CHECK_CONDITION_UNIT_ATTENTION:
  4017. /* CURRENT ERROR */
  4018. buffer[offset] = 0x70;
  4019. /* UNIT ATTENTION */
  4020. buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
  4021. core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
  4022. buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
  4023. buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
  4024. break;
  4025. case TCM_CHECK_CONDITION_NOT_READY:
  4026. /* CURRENT ERROR */
  4027. buffer[offset] = 0x70;
  4028. /* Not Ready */
  4029. buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
  4030. transport_get_sense_codes(cmd, &asc, &ascq);
  4031. buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
  4032. buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
  4033. break;
  4034. case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
  4035. default:
  4036. /* CURRENT ERROR */
  4037. buffer[offset] = 0x70;
  4038. /* ILLEGAL REQUEST */
  4039. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  4040. /* LOGICAL UNIT COMMUNICATION FAILURE */
  4041. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
  4042. break;
  4043. }
  4044. /*
  4045. * This code uses linux/include/scsi/scsi.h SAM status codes!
  4046. */
  4047. cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
  4048. /*
  4049. * Automatically padded, this value is encoded in the fabric's
  4050. * data_length response PDU containing the SCSI defined sense data.
  4051. */
  4052. cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
  4053. after_reason:
  4054. return cmd->se_tfo->queue_status(cmd);
  4055. }
  4056. EXPORT_SYMBOL(transport_send_check_condition_and_sense);
  4057. int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
  4058. {
  4059. int ret = 0;
  4060. if (atomic_read(&cmd->t_transport_aborted) != 0) {
  4061. if (!send_status ||
  4062. (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
  4063. return 1;
  4064. #if 0
  4065. pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
  4066. " status for CDB: 0x%02x ITT: 0x%08x\n",
  4067. cmd->t_task_cdb[0],
  4068. cmd->se_tfo->get_task_tag(cmd));
  4069. #endif
  4070. cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
  4071. cmd->se_tfo->queue_status(cmd);
  4072. ret = 1;
  4073. }
  4074. return ret;
  4075. }
  4076. EXPORT_SYMBOL(transport_check_aborted_status);
  4077. void transport_send_task_abort(struct se_cmd *cmd)
  4078. {
  4079. unsigned long flags;
  4080. spin_lock_irqsave(&cmd->t_state_lock, flags);
  4081. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
  4082. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  4083. return;
  4084. }
  4085. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  4086. /*
  4087. * If there are still expected incoming fabric WRITEs, we wait
  4088. * until until they have completed before sending a TASK_ABORTED
  4089. * response. This response with TASK_ABORTED status will be
  4090. * queued back to fabric module by transport_check_aborted_status().
  4091. */
  4092. if (cmd->data_direction == DMA_TO_DEVICE) {
  4093. if (cmd->se_tfo->write_pending_status(cmd) != 0) {
  4094. atomic_inc(&cmd->t_transport_aborted);
  4095. smp_mb__after_atomic_inc();
  4096. cmd->scsi_status = SAM_STAT_TASK_ABORTED;
  4097. transport_new_cmd_failure(cmd);
  4098. return;
  4099. }
  4100. }
  4101. cmd->scsi_status = SAM_STAT_TASK_ABORTED;
  4102. #if 0
  4103. pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
  4104. " ITT: 0x%08x\n", cmd->t_task_cdb[0],
  4105. cmd->se_tfo->get_task_tag(cmd));
  4106. #endif
  4107. cmd->se_tfo->queue_status(cmd);
  4108. }
  4109. /* transport_generic_do_tmr():
  4110. *
  4111. *
  4112. */
  4113. int transport_generic_do_tmr(struct se_cmd *cmd)
  4114. {
  4115. struct se_device *dev = cmd->se_dev;
  4116. struct se_tmr_req *tmr = cmd->se_tmr_req;
  4117. int ret;
  4118. switch (tmr->function) {
  4119. case TMR_ABORT_TASK:
  4120. tmr->response = TMR_FUNCTION_REJECTED;
  4121. break;
  4122. case TMR_ABORT_TASK_SET:
  4123. case TMR_CLEAR_ACA:
  4124. case TMR_CLEAR_TASK_SET:
  4125. tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
  4126. break;
  4127. case TMR_LUN_RESET:
  4128. ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
  4129. tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
  4130. TMR_FUNCTION_REJECTED;
  4131. break;
  4132. case TMR_TARGET_WARM_RESET:
  4133. tmr->response = TMR_FUNCTION_REJECTED;
  4134. break;
  4135. case TMR_TARGET_COLD_RESET:
  4136. tmr->response = TMR_FUNCTION_REJECTED;
  4137. break;
  4138. default:
  4139. pr_err("Uknown TMR function: 0x%02x.\n",
  4140. tmr->function);
  4141. tmr->response = TMR_FUNCTION_REJECTED;
  4142. break;
  4143. }
  4144. cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
  4145. cmd->se_tfo->queue_tm_rsp(cmd);
  4146. transport_cmd_check_stop_to_fabric(cmd);
  4147. return 0;
  4148. }
  4149. /* transport_processing_thread():
  4150. *
  4151. *
  4152. */
  4153. static int transport_processing_thread(void *param)
  4154. {
  4155. int ret;
  4156. struct se_cmd *cmd;
  4157. struct se_device *dev = (struct se_device *) param;
  4158. set_user_nice(current, -20);
  4159. while (!kthread_should_stop()) {
  4160. ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
  4161. atomic_read(&dev->dev_queue_obj.queue_cnt) ||
  4162. kthread_should_stop());
  4163. if (ret < 0)
  4164. goto out;
  4165. get_cmd:
  4166. __transport_execute_tasks(dev);
  4167. cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
  4168. if (!cmd)
  4169. continue;
  4170. switch (cmd->t_state) {
  4171. case TRANSPORT_NEW_CMD:
  4172. BUG();
  4173. break;
  4174. case TRANSPORT_NEW_CMD_MAP:
  4175. if (!cmd->se_tfo->new_cmd_map) {
  4176. pr_err("cmd->se_tfo->new_cmd_map is"
  4177. " NULL for TRANSPORT_NEW_CMD_MAP\n");
  4178. BUG();
  4179. }
  4180. ret = cmd->se_tfo->new_cmd_map(cmd);
  4181. if (ret < 0) {
  4182. cmd->transport_error_status = ret;
  4183. transport_generic_request_failure(cmd,
  4184. 0, (cmd->data_direction !=
  4185. DMA_TO_DEVICE));
  4186. break;
  4187. }
  4188. ret = transport_generic_new_cmd(cmd);
  4189. if (ret < 0) {
  4190. cmd->transport_error_status = ret;
  4191. transport_generic_request_failure(cmd,
  4192. 0, (cmd->data_direction !=
  4193. DMA_TO_DEVICE));
  4194. }
  4195. break;
  4196. case TRANSPORT_PROCESS_WRITE:
  4197. transport_generic_process_write(cmd);
  4198. break;
  4199. case TRANSPORT_PROCESS_TMR:
  4200. transport_generic_do_tmr(cmd);
  4201. break;
  4202. case TRANSPORT_COMPLETE_QF_WP:
  4203. transport_write_pending_qf(cmd);
  4204. break;
  4205. case TRANSPORT_COMPLETE_QF_OK:
  4206. transport_complete_qf(cmd);
  4207. break;
  4208. default:
  4209. pr_err("Unknown t_state: %d for ITT: 0x%08x "
  4210. "i_state: %d on SE LUN: %u\n",
  4211. cmd->t_state,
  4212. cmd->se_tfo->get_task_tag(cmd),
  4213. cmd->se_tfo->get_cmd_state(cmd),
  4214. cmd->se_lun->unpacked_lun);
  4215. BUG();
  4216. }
  4217. goto get_cmd;
  4218. }
  4219. out:
  4220. WARN_ON(!list_empty(&dev->state_task_list));
  4221. WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
  4222. dev->process_thread = NULL;
  4223. return 0;
  4224. }