scsi.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247
  1. /*******************************************************************************
  2. * Vhost kernel TCM fabric driver for virtio SCSI initiators
  3. *
  4. * (C) Copyright 2010-2013 Datera, Inc.
  5. * (C) Copyright 2010-2012 IBM Corp.
  6. *
  7. * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
  8. *
  9. * Authors: Nicholas A. Bellinger <nab@daterainc.com>
  10. * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. ****************************************************************************/
  23. #include <linux/module.h>
  24. #include <linux/moduleparam.h>
  25. #include <generated/utsrelease.h>
  26. #include <linux/utsname.h>
  27. #include <linux/init.h>
  28. #include <linux/slab.h>
  29. #include <linux/kthread.h>
  30. #include <linux/types.h>
  31. #include <linux/string.h>
  32. #include <linux/configfs.h>
  33. #include <linux/ctype.h>
  34. #include <linux/compat.h>
  35. #include <linux/eventfd.h>
  36. #include <linux/fs.h>
  37. #include <linux/miscdevice.h>
  38. #include <asm/unaligned.h>
  39. #include <scsi/scsi.h>
  40. #include <scsi/scsi_tcq.h>
  41. #include <target/target_core_base.h>
  42. #include <target/target_core_fabric.h>
  43. #include <target/target_core_fabric_configfs.h>
  44. #include <target/target_core_configfs.h>
  45. #include <target/configfs_macros.h>
  46. #include <linux/vhost.h>
  47. #include <linux/virtio_scsi.h>
  48. #include <linux/llist.h>
  49. #include <linux/bitmap.h>
  50. #include <linux/percpu_ida.h>
  51. #include "vhost.h"
  52. #define TCM_VHOST_VERSION "v0.1"
  53. #define TCM_VHOST_NAMELEN 256
  54. #define TCM_VHOST_MAX_CDB_SIZE 32
  55. #define TCM_VHOST_DEFAULT_TAGS 256
  56. #define TCM_VHOST_PREALLOC_SGLS 2048
  57. #define TCM_VHOST_PREALLOC_PAGES 2048
  58. struct vhost_scsi_inflight {
  59. /* Wait for the flush operation to finish */
  60. struct completion comp;
  61. /* Refcount for the inflight reqs */
  62. struct kref kref;
  63. };
  64. struct tcm_vhost_cmd {
  65. /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  66. int tvc_vq_desc;
  67. /* virtio-scsi initiator task attribute */
  68. int tvc_task_attr;
  69. /* virtio-scsi initiator data direction */
  70. enum dma_data_direction tvc_data_direction;
  71. /* Expected data transfer length from virtio-scsi header */
  72. u32 tvc_exp_data_len;
  73. /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  74. u64 tvc_tag;
  75. /* The number of scatterlists associated with this cmd */
  76. u32 tvc_sgl_count;
  77. /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
  78. u32 tvc_lun;
  79. /* Pointer to the SGL formatted memory from virtio-scsi */
  80. struct scatterlist *tvc_sgl;
  81. struct page **tvc_upages;
  82. /* Pointer to response */
  83. struct virtio_scsi_cmd_resp __user *tvc_resp;
  84. /* Pointer to vhost_scsi for our device */
  85. struct vhost_scsi *tvc_vhost;
  86. /* Pointer to vhost_virtqueue for the cmd */
  87. struct vhost_virtqueue *tvc_vq;
  88. /* Pointer to vhost nexus memory */
  89. struct tcm_vhost_nexus *tvc_nexus;
  90. /* The TCM I/O descriptor that is accessed via container_of() */
  91. struct se_cmd tvc_se_cmd;
  92. /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
  93. struct work_struct work;
  94. /* Copy of the incoming SCSI command descriptor block (CDB) */
  95. unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
  96. /* Sense buffer that will be mapped into outgoing status */
  97. unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
  98. /* Completed commands list, serviced from vhost worker thread */
  99. struct llist_node tvc_completion_list;
  100. /* Used to track inflight cmd */
  101. struct vhost_scsi_inflight *inflight;
  102. };
  103. struct tcm_vhost_nexus {
  104. /* Pointer to TCM session for I_T Nexus */
  105. struct se_session *tvn_se_sess;
  106. };
  107. struct tcm_vhost_nacl {
  108. /* Binary World Wide unique Port Name for Vhost Initiator port */
  109. u64 iport_wwpn;
  110. /* ASCII formatted WWPN for Sas Initiator port */
  111. char iport_name[TCM_VHOST_NAMELEN];
  112. /* Returned by tcm_vhost_make_nodeacl() */
  113. struct se_node_acl se_node_acl;
  114. };
  115. struct tcm_vhost_tpg {
  116. /* Vhost port target portal group tag for TCM */
  117. u16 tport_tpgt;
  118. /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
  119. int tv_tpg_port_count;
  120. /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
  121. int tv_tpg_vhost_count;
  122. /* list for tcm_vhost_list */
  123. struct list_head tv_tpg_list;
  124. /* Used to protect access for tpg_nexus */
  125. struct mutex tv_tpg_mutex;
  126. /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
  127. struct tcm_vhost_nexus *tpg_nexus;
  128. /* Pointer back to tcm_vhost_tport */
  129. struct tcm_vhost_tport *tport;
  130. /* Returned by tcm_vhost_make_tpg() */
  131. struct se_portal_group se_tpg;
  132. /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
  133. struct vhost_scsi *vhost_scsi;
  134. };
  135. struct tcm_vhost_tport {
  136. /* SCSI protocol the tport is providing */
  137. u8 tport_proto_id;
  138. /* Binary World Wide unique Port Name for Vhost Target port */
  139. u64 tport_wwpn;
  140. /* ASCII formatted WWPN for Vhost Target port */
  141. char tport_name[TCM_VHOST_NAMELEN];
  142. /* Returned by tcm_vhost_make_tport() */
  143. struct se_wwn tport_wwn;
  144. };
  145. struct tcm_vhost_evt {
  146. /* event to be sent to guest */
  147. struct virtio_scsi_event event;
  148. /* event list, serviced from vhost worker thread */
  149. struct llist_node list;
  150. };
  151. enum {
  152. VHOST_SCSI_VQ_CTL = 0,
  153. VHOST_SCSI_VQ_EVT = 1,
  154. VHOST_SCSI_VQ_IO = 2,
  155. };
  156. enum {
  157. VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
  158. };
  159. #define VHOST_SCSI_MAX_TARGET 256
  160. #define VHOST_SCSI_MAX_VQ 128
  161. #define VHOST_SCSI_MAX_EVENT 128
  162. struct vhost_scsi_virtqueue {
  163. struct vhost_virtqueue vq;
  164. /*
  165. * Reference counting for inflight reqs, used for flush operation. At
  166. * each time, one reference tracks new commands submitted, while we
  167. * wait for another one to reach 0.
  168. */
  169. struct vhost_scsi_inflight inflights[2];
  170. /*
  171. * Indicate current inflight in use, protected by vq->mutex.
  172. * Writers must also take dev mutex and flush under it.
  173. */
  174. int inflight_idx;
  175. };
  176. struct vhost_scsi {
  177. /* Protected by vhost_scsi->dev.mutex */
  178. struct tcm_vhost_tpg **vs_tpg;
  179. char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
  180. struct vhost_dev dev;
  181. struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
  182. struct vhost_work vs_completion_work; /* cmd completion work item */
  183. struct llist_head vs_completion_list; /* cmd completion queue */
  184. struct vhost_work vs_event_work; /* evt injection work item */
  185. struct llist_head vs_event_list; /* evt injection queue */
  186. bool vs_events_missed; /* any missed events, protected by vq->mutex */
  187. int vs_events_nr; /* num of pending events, protected by vq->mutex */
  188. };
  189. /* Local pointer to allocated TCM configfs fabric module */
  190. static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
  191. static struct workqueue_struct *tcm_vhost_workqueue;
  192. /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
  193. static DEFINE_MUTEX(tcm_vhost_mutex);
  194. static LIST_HEAD(tcm_vhost_list);
  195. static int iov_num_pages(struct iovec *iov)
  196. {
  197. return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
  198. ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
  199. }
  200. static void tcm_vhost_done_inflight(struct kref *kref)
  201. {
  202. struct vhost_scsi_inflight *inflight;
  203. inflight = container_of(kref, struct vhost_scsi_inflight, kref);
  204. complete(&inflight->comp);
  205. }
  206. static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
  207. struct vhost_scsi_inflight *old_inflight[])
  208. {
  209. struct vhost_scsi_inflight *new_inflight;
  210. struct vhost_virtqueue *vq;
  211. int idx, i;
  212. for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
  213. vq = &vs->vqs[i].vq;
  214. mutex_lock(&vq->mutex);
  215. /* store old infight */
  216. idx = vs->vqs[i].inflight_idx;
  217. if (old_inflight)
  218. old_inflight[i] = &vs->vqs[i].inflights[idx];
  219. /* setup new infight */
  220. vs->vqs[i].inflight_idx = idx ^ 1;
  221. new_inflight = &vs->vqs[i].inflights[idx ^ 1];
  222. kref_init(&new_inflight->kref);
  223. init_completion(&new_inflight->comp);
  224. mutex_unlock(&vq->mutex);
  225. }
  226. }
  227. static struct vhost_scsi_inflight *
  228. tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
  229. {
  230. struct vhost_scsi_inflight *inflight;
  231. struct vhost_scsi_virtqueue *svq;
  232. svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
  233. inflight = &svq->inflights[svq->inflight_idx];
  234. kref_get(&inflight->kref);
  235. return inflight;
  236. }
  237. static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
  238. {
  239. kref_put(&inflight->kref, tcm_vhost_done_inflight);
  240. }
  241. static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
  242. {
  243. return 1;
  244. }
  245. static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
  246. {
  247. return 0;
  248. }
  249. static char *tcm_vhost_get_fabric_name(void)
  250. {
  251. return "vhost";
  252. }
  253. static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
  254. {
  255. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  256. struct tcm_vhost_tpg, se_tpg);
  257. struct tcm_vhost_tport *tport = tpg->tport;
  258. switch (tport->tport_proto_id) {
  259. case SCSI_PROTOCOL_SAS:
  260. return sas_get_fabric_proto_ident(se_tpg);
  261. case SCSI_PROTOCOL_FCP:
  262. return fc_get_fabric_proto_ident(se_tpg);
  263. case SCSI_PROTOCOL_ISCSI:
  264. return iscsi_get_fabric_proto_ident(se_tpg);
  265. default:
  266. pr_err("Unknown tport_proto_id: 0x%02x, using"
  267. " SAS emulation\n", tport->tport_proto_id);
  268. break;
  269. }
  270. return sas_get_fabric_proto_ident(se_tpg);
  271. }
  272. static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
  273. {
  274. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  275. struct tcm_vhost_tpg, se_tpg);
  276. struct tcm_vhost_tport *tport = tpg->tport;
  277. return &tport->tport_name[0];
  278. }
  279. static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
  280. {
  281. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  282. struct tcm_vhost_tpg, se_tpg);
  283. return tpg->tport_tpgt;
  284. }
  285. static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
  286. {
  287. return 1;
  288. }
  289. static u32
  290. tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
  291. struct se_node_acl *se_nacl,
  292. struct t10_pr_registration *pr_reg,
  293. int *format_code,
  294. unsigned char *buf)
  295. {
  296. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  297. struct tcm_vhost_tpg, se_tpg);
  298. struct tcm_vhost_tport *tport = tpg->tport;
  299. switch (tport->tport_proto_id) {
  300. case SCSI_PROTOCOL_SAS:
  301. return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  302. format_code, buf);
  303. case SCSI_PROTOCOL_FCP:
  304. return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  305. format_code, buf);
  306. case SCSI_PROTOCOL_ISCSI:
  307. return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  308. format_code, buf);
  309. default:
  310. pr_err("Unknown tport_proto_id: 0x%02x, using"
  311. " SAS emulation\n", tport->tport_proto_id);
  312. break;
  313. }
  314. return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  315. format_code, buf);
  316. }
  317. static u32
  318. tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
  319. struct se_node_acl *se_nacl,
  320. struct t10_pr_registration *pr_reg,
  321. int *format_code)
  322. {
  323. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  324. struct tcm_vhost_tpg, se_tpg);
  325. struct tcm_vhost_tport *tport = tpg->tport;
  326. switch (tport->tport_proto_id) {
  327. case SCSI_PROTOCOL_SAS:
  328. return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  329. format_code);
  330. case SCSI_PROTOCOL_FCP:
  331. return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  332. format_code);
  333. case SCSI_PROTOCOL_ISCSI:
  334. return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  335. format_code);
  336. default:
  337. pr_err("Unknown tport_proto_id: 0x%02x, using"
  338. " SAS emulation\n", tport->tport_proto_id);
  339. break;
  340. }
  341. return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  342. format_code);
  343. }
  344. static char *
  345. tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
  346. const char *buf,
  347. u32 *out_tid_len,
  348. char **port_nexus_ptr)
  349. {
  350. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  351. struct tcm_vhost_tpg, se_tpg);
  352. struct tcm_vhost_tport *tport = tpg->tport;
  353. switch (tport->tport_proto_id) {
  354. case SCSI_PROTOCOL_SAS:
  355. return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  356. port_nexus_ptr);
  357. case SCSI_PROTOCOL_FCP:
  358. return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  359. port_nexus_ptr);
  360. case SCSI_PROTOCOL_ISCSI:
  361. return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  362. port_nexus_ptr);
  363. default:
  364. pr_err("Unknown tport_proto_id: 0x%02x, using"
  365. " SAS emulation\n", tport->tport_proto_id);
  366. break;
  367. }
  368. return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  369. port_nexus_ptr);
  370. }
  371. static struct se_node_acl *
  372. tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
  373. {
  374. struct tcm_vhost_nacl *nacl;
  375. nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
  376. if (!nacl) {
  377. pr_err("Unable to allocate struct tcm_vhost_nacl\n");
  378. return NULL;
  379. }
  380. return &nacl->se_node_acl;
  381. }
  382. static void
  383. tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
  384. struct se_node_acl *se_nacl)
  385. {
  386. struct tcm_vhost_nacl *nacl = container_of(se_nacl,
  387. struct tcm_vhost_nacl, se_node_acl);
  388. kfree(nacl);
  389. }
  390. static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
  391. {
  392. return 1;
  393. }
  394. static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
  395. {
  396. struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
  397. struct tcm_vhost_cmd, tvc_se_cmd);
  398. struct se_session *se_sess = se_cmd->se_sess;
  399. if (tv_cmd->tvc_sgl_count) {
  400. u32 i;
  401. for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
  402. put_page(sg_page(&tv_cmd->tvc_sgl[i]));
  403. }
  404. tcm_vhost_put_inflight(tv_cmd->inflight);
  405. percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
  406. }
  407. static int tcm_vhost_shutdown_session(struct se_session *se_sess)
  408. {
  409. return 0;
  410. }
  411. static void tcm_vhost_close_session(struct se_session *se_sess)
  412. {
  413. return;
  414. }
  415. static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
  416. {
  417. return 0;
  418. }
  419. static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
  420. {
  421. /* Go ahead and process the write immediately */
  422. target_execute_cmd(se_cmd);
  423. return 0;
  424. }
  425. static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
  426. {
  427. return 0;
  428. }
  429. static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
  430. {
  431. return;
  432. }
  433. static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
  434. {
  435. return 0;
  436. }
  437. static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
  438. {
  439. return 0;
  440. }
  441. static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
  442. {
  443. struct vhost_scsi *vs = cmd->tvc_vhost;
  444. llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
  445. vhost_work_queue(&vs->dev, &vs->vs_completion_work);
  446. }
  447. static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
  448. {
  449. struct tcm_vhost_cmd *cmd = container_of(se_cmd,
  450. struct tcm_vhost_cmd, tvc_se_cmd);
  451. vhost_scsi_complete_cmd(cmd);
  452. return 0;
  453. }
  454. static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
  455. {
  456. struct tcm_vhost_cmd *cmd = container_of(se_cmd,
  457. struct tcm_vhost_cmd, tvc_se_cmd);
  458. vhost_scsi_complete_cmd(cmd);
  459. return 0;
  460. }
  461. static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
  462. {
  463. return;
  464. }
  465. static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
  466. {
  467. vs->vs_events_nr--;
  468. kfree(evt);
  469. }
  470. static struct tcm_vhost_evt *
  471. tcm_vhost_allocate_evt(struct vhost_scsi *vs,
  472. u32 event, u32 reason)
  473. {
  474. struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
  475. struct tcm_vhost_evt *evt;
  476. if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
  477. vs->vs_events_missed = true;
  478. return NULL;
  479. }
  480. evt = kzalloc(sizeof(*evt), GFP_KERNEL);
  481. if (!evt) {
  482. vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
  483. vs->vs_events_missed = true;
  484. return NULL;
  485. }
  486. evt->event.event = event;
  487. evt->event.reason = reason;
  488. vs->vs_events_nr++;
  489. return evt;
  490. }
  491. static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
  492. {
  493. struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
  494. /* TODO locking against target/backend threads? */
  495. transport_generic_free_cmd(se_cmd, 0);
  496. }
  497. static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
  498. {
  499. return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
  500. }
  501. static void
  502. tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
  503. {
  504. struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
  505. struct virtio_scsi_event *event = &evt->event;
  506. struct virtio_scsi_event __user *eventp;
  507. unsigned out, in;
  508. int head, ret;
  509. if (!vq->private_data) {
  510. vs->vs_events_missed = true;
  511. return;
  512. }
  513. again:
  514. vhost_disable_notify(&vs->dev, vq);
  515. head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
  516. ARRAY_SIZE(vq->iov), &out, &in,
  517. NULL, NULL);
  518. if (head < 0) {
  519. vs->vs_events_missed = true;
  520. return;
  521. }
  522. if (head == vq->num) {
  523. if (vhost_enable_notify(&vs->dev, vq))
  524. goto again;
  525. vs->vs_events_missed = true;
  526. return;
  527. }
  528. if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
  529. vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
  530. vq->iov[out].iov_len);
  531. vs->vs_events_missed = true;
  532. return;
  533. }
  534. if (vs->vs_events_missed) {
  535. event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
  536. vs->vs_events_missed = false;
  537. }
  538. eventp = vq->iov[out].iov_base;
  539. ret = __copy_to_user(eventp, event, sizeof(*event));
  540. if (!ret)
  541. vhost_add_used_and_signal(&vs->dev, vq, head, 0);
  542. else
  543. vq_err(vq, "Faulted on tcm_vhost_send_event\n");
  544. }
  545. static void tcm_vhost_evt_work(struct vhost_work *work)
  546. {
  547. struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
  548. vs_event_work);
  549. struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
  550. struct tcm_vhost_evt *evt;
  551. struct llist_node *llnode;
  552. mutex_lock(&vq->mutex);
  553. llnode = llist_del_all(&vs->vs_event_list);
  554. while (llnode) {
  555. evt = llist_entry(llnode, struct tcm_vhost_evt, list);
  556. llnode = llist_next(llnode);
  557. tcm_vhost_do_evt_work(vs, evt);
  558. tcm_vhost_free_evt(vs, evt);
  559. }
  560. mutex_unlock(&vq->mutex);
  561. }
  562. /* Fill in status and signal that we are done processing this command
  563. *
  564. * This is scheduled in the vhost work queue so we are called with the owner
  565. * process mm and can access the vring.
  566. */
  567. static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
  568. {
  569. struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
  570. vs_completion_work);
  571. DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
  572. struct virtio_scsi_cmd_resp v_rsp;
  573. struct tcm_vhost_cmd *cmd;
  574. struct llist_node *llnode;
  575. struct se_cmd *se_cmd;
  576. int ret, vq;
  577. bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
  578. llnode = llist_del_all(&vs->vs_completion_list);
  579. while (llnode) {
  580. cmd = llist_entry(llnode, struct tcm_vhost_cmd,
  581. tvc_completion_list);
  582. llnode = llist_next(llnode);
  583. se_cmd = &cmd->tvc_se_cmd;
  584. pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
  585. cmd, se_cmd->residual_count, se_cmd->scsi_status);
  586. memset(&v_rsp, 0, sizeof(v_rsp));
  587. v_rsp.resid = se_cmd->residual_count;
  588. /* TODO is status_qualifier field needed? */
  589. v_rsp.status = se_cmd->scsi_status;
  590. v_rsp.sense_len = se_cmd->scsi_sense_length;
  591. memcpy(v_rsp.sense, cmd->tvc_sense_buf,
  592. v_rsp.sense_len);
  593. ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
  594. if (likely(ret == 0)) {
  595. struct vhost_scsi_virtqueue *q;
  596. vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
  597. q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
  598. vq = q - vs->vqs;
  599. __set_bit(vq, signal);
  600. } else
  601. pr_err("Faulted on virtio_scsi_cmd_resp\n");
  602. vhost_scsi_free_cmd(cmd);
  603. }
  604. vq = -1;
  605. while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
  606. < VHOST_SCSI_MAX_VQ)
  607. vhost_signal(&vs->dev, &vs->vqs[vq].vq);
  608. }
  609. static struct tcm_vhost_cmd *
  610. vhost_scsi_get_tag(struct vhost_virtqueue *vq,
  611. struct tcm_vhost_tpg *tpg,
  612. struct virtio_scsi_cmd_req *v_req,
  613. u32 exp_data_len,
  614. int data_direction)
  615. {
  616. struct tcm_vhost_cmd *cmd;
  617. struct tcm_vhost_nexus *tv_nexus;
  618. struct se_session *se_sess;
  619. struct scatterlist *sg;
  620. struct page **pages;
  621. int tag;
  622. tv_nexus = tpg->tpg_nexus;
  623. if (!tv_nexus) {
  624. pr_err("Unable to locate active struct tcm_vhost_nexus\n");
  625. return ERR_PTR(-EIO);
  626. }
  627. se_sess = tv_nexus->tvn_se_sess;
  628. tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
  629. if (tag < 0) {
  630. pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
  631. return ERR_PTR(-ENOMEM);
  632. }
  633. cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
  634. sg = cmd->tvc_sgl;
  635. pages = cmd->tvc_upages;
  636. memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
  637. cmd->tvc_sgl = sg;
  638. cmd->tvc_upages = pages;
  639. cmd->tvc_se_cmd.map_tag = tag;
  640. cmd->tvc_tag = v_req->tag;
  641. cmd->tvc_task_attr = v_req->task_attr;
  642. cmd->tvc_exp_data_len = exp_data_len;
  643. cmd->tvc_data_direction = data_direction;
  644. cmd->tvc_nexus = tv_nexus;
  645. cmd->inflight = tcm_vhost_get_inflight(vq);
  646. return cmd;
  647. }
  648. /*
  649. * Map a user memory range into a scatterlist
  650. *
  651. * Returns the number of scatterlist entries used or -errno on error.
  652. */
  653. static int
  654. vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
  655. struct scatterlist *sgl,
  656. unsigned int sgl_count,
  657. struct iovec *iov,
  658. int write)
  659. {
  660. unsigned int npages = 0, pages_nr, offset, nbytes;
  661. struct scatterlist *sg = sgl;
  662. void __user *ptr = iov->iov_base;
  663. size_t len = iov->iov_len;
  664. struct page **pages;
  665. int ret, i;
  666. if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
  667. pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than"
  668. " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
  669. sgl_count, TCM_VHOST_PREALLOC_SGLS);
  670. return -ENOBUFS;
  671. }
  672. pages_nr = iov_num_pages(iov);
  673. if (pages_nr > sgl_count)
  674. return -ENOBUFS;
  675. if (pages_nr > TCM_VHOST_PREALLOC_PAGES) {
  676. pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
  677. " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n",
  678. pages_nr, TCM_VHOST_PREALLOC_PAGES);
  679. return -ENOBUFS;
  680. }
  681. pages = tv_cmd->tvc_upages;
  682. ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
  683. /* No pages were pinned */
  684. if (ret < 0)
  685. goto out;
  686. /* Less pages pinned than wanted */
  687. if (ret != pages_nr) {
  688. for (i = 0; i < ret; i++)
  689. put_page(pages[i]);
  690. ret = -EFAULT;
  691. goto out;
  692. }
  693. while (len > 0) {
  694. offset = (uintptr_t)ptr & ~PAGE_MASK;
  695. nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
  696. sg_set_page(sg, pages[npages], nbytes, offset);
  697. ptr += nbytes;
  698. len -= nbytes;
  699. sg++;
  700. npages++;
  701. }
  702. out:
  703. return ret;
  704. }
  705. static int
  706. vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
  707. struct iovec *iov,
  708. unsigned int niov,
  709. int write)
  710. {
  711. int ret;
  712. unsigned int i;
  713. u32 sgl_count;
  714. struct scatterlist *sg;
  715. /*
  716. * Find out how long sglist needs to be
  717. */
  718. sgl_count = 0;
  719. for (i = 0; i < niov; i++)
  720. sgl_count += iov_num_pages(&iov[i]);
  721. /* TODO overflow checking */
  722. sg = cmd->tvc_sgl;
  723. pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
  724. sg_init_table(sg, sgl_count);
  725. cmd->tvc_sgl_count = sgl_count;
  726. pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
  727. for (i = 0; i < niov; i++) {
  728. ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
  729. write);
  730. if (ret < 0) {
  731. for (i = 0; i < cmd->tvc_sgl_count; i++)
  732. put_page(sg_page(&cmd->tvc_sgl[i]));
  733. cmd->tvc_sgl_count = 0;
  734. return ret;
  735. }
  736. sg += ret;
  737. sgl_count -= ret;
  738. }
  739. return 0;
  740. }
  741. static void tcm_vhost_submission_work(struct work_struct *work)
  742. {
  743. struct tcm_vhost_cmd *cmd =
  744. container_of(work, struct tcm_vhost_cmd, work);
  745. struct tcm_vhost_nexus *tv_nexus;
  746. struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
  747. struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
  748. int rc, sg_no_bidi = 0;
  749. if (cmd->tvc_sgl_count) {
  750. sg_ptr = cmd->tvc_sgl;
  751. /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
  752. #if 0
  753. if (se_cmd->se_cmd_flags & SCF_BIDI) {
  754. sg_bidi_ptr = NULL;
  755. sg_no_bidi = 0;
  756. }
  757. #endif
  758. } else {
  759. sg_ptr = NULL;
  760. }
  761. tv_nexus = cmd->tvc_nexus;
  762. rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
  763. cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
  764. cmd->tvc_lun, cmd->tvc_exp_data_len,
  765. cmd->tvc_task_attr, cmd->tvc_data_direction,
  766. TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
  767. sg_bidi_ptr, sg_no_bidi);
  768. if (rc < 0) {
  769. transport_send_check_condition_and_sense(se_cmd,
  770. TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
  771. transport_generic_free_cmd(se_cmd, 0);
  772. }
  773. }
  774. static void
  775. vhost_scsi_send_bad_target(struct vhost_scsi *vs,
  776. struct vhost_virtqueue *vq,
  777. int head, unsigned out)
  778. {
  779. struct virtio_scsi_cmd_resp __user *resp;
  780. struct virtio_scsi_cmd_resp rsp;
  781. int ret;
  782. memset(&rsp, 0, sizeof(rsp));
  783. rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
  784. resp = vq->iov[out].iov_base;
  785. ret = __copy_to_user(resp, &rsp, sizeof(rsp));
  786. if (!ret)
  787. vhost_add_used_and_signal(&vs->dev, vq, head, 0);
  788. else
  789. pr_err("Faulted on virtio_scsi_cmd_resp\n");
  790. }
  791. static void
  792. vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
  793. {
  794. struct tcm_vhost_tpg **vs_tpg;
  795. struct virtio_scsi_cmd_req v_req;
  796. struct tcm_vhost_tpg *tpg;
  797. struct tcm_vhost_cmd *cmd;
  798. u32 exp_data_len, data_first, data_num, data_direction;
  799. unsigned out, in, i;
  800. int head, ret;
  801. u8 target;
  802. mutex_lock(&vq->mutex);
  803. /*
  804. * We can handle the vq only after the endpoint is setup by calling the
  805. * VHOST_SCSI_SET_ENDPOINT ioctl.
  806. */
  807. vs_tpg = vq->private_data;
  808. if (!vs_tpg)
  809. goto out;
  810. vhost_disable_notify(&vs->dev, vq);
  811. for (;;) {
  812. head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
  813. ARRAY_SIZE(vq->iov), &out, &in,
  814. NULL, NULL);
  815. pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
  816. head, out, in);
  817. /* On error, stop handling until the next kick. */
  818. if (unlikely(head < 0))
  819. break;
  820. /* Nothing new? Wait for eventfd to tell us they refilled. */
  821. if (head == vq->num) {
  822. if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
  823. vhost_disable_notify(&vs->dev, vq);
  824. continue;
  825. }
  826. break;
  827. }
  828. /* FIXME: BIDI operation */
  829. if (out == 1 && in == 1) {
  830. data_direction = DMA_NONE;
  831. data_first = 0;
  832. data_num = 0;
  833. } else if (out == 1 && in > 1) {
  834. data_direction = DMA_FROM_DEVICE;
  835. data_first = out + 1;
  836. data_num = in - 1;
  837. } else if (out > 1 && in == 1) {
  838. data_direction = DMA_TO_DEVICE;
  839. data_first = 1;
  840. data_num = out - 1;
  841. } else {
  842. vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
  843. out, in);
  844. break;
  845. }
  846. /*
  847. * Check for a sane resp buffer so we can report errors to
  848. * the guest.
  849. */
  850. if (unlikely(vq->iov[out].iov_len !=
  851. sizeof(struct virtio_scsi_cmd_resp))) {
  852. vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
  853. " bytes\n", vq->iov[out].iov_len);
  854. break;
  855. }
  856. if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
  857. vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
  858. " bytes\n", vq->iov[0].iov_len);
  859. break;
  860. }
  861. pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
  862. " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
  863. ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
  864. sizeof(v_req));
  865. if (unlikely(ret)) {
  866. vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
  867. break;
  868. }
  869. /* Extract the tpgt */
  870. target = v_req.lun[1];
  871. tpg = ACCESS_ONCE(vs_tpg[target]);
  872. /* Target does not exist, fail the request */
  873. if (unlikely(!tpg)) {
  874. vhost_scsi_send_bad_target(vs, vq, head, out);
  875. continue;
  876. }
  877. exp_data_len = 0;
  878. for (i = 0; i < data_num; i++)
  879. exp_data_len += vq->iov[data_first + i].iov_len;
  880. cmd = vhost_scsi_get_tag(vq, tpg, &v_req,
  881. exp_data_len, data_direction);
  882. if (IS_ERR(cmd)) {
  883. vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
  884. PTR_ERR(cmd));
  885. goto err_cmd;
  886. }
  887. pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
  888. ": %d\n", cmd, exp_data_len, data_direction);
  889. cmd->tvc_vhost = vs;
  890. cmd->tvc_vq = vq;
  891. cmd->tvc_resp = vq->iov[out].iov_base;
  892. /*
  893. * Copy in the recieved CDB descriptor into cmd->tvc_cdb
  894. * that will be used by tcm_vhost_new_cmd_map() and down into
  895. * target_setup_cmd_from_cdb()
  896. */
  897. memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
  898. /*
  899. * Check that the recieved CDB size does not exceeded our
  900. * hardcoded max for tcm_vhost
  901. */
  902. /* TODO what if cdb was too small for varlen cdb header? */
  903. if (unlikely(scsi_command_size(cmd->tvc_cdb) >
  904. TCM_VHOST_MAX_CDB_SIZE)) {
  905. vq_err(vq, "Received SCSI CDB with command_size: %d that"
  906. " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
  907. scsi_command_size(cmd->tvc_cdb),
  908. TCM_VHOST_MAX_CDB_SIZE);
  909. goto err_free;
  910. }
  911. cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
  912. pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
  913. cmd->tvc_cdb[0], cmd->tvc_lun);
  914. if (data_direction != DMA_NONE) {
  915. ret = vhost_scsi_map_iov_to_sgl(cmd,
  916. &vq->iov[data_first], data_num,
  917. data_direction == DMA_FROM_DEVICE);
  918. if (unlikely(ret)) {
  919. vq_err(vq, "Failed to map iov to sgl\n");
  920. goto err_free;
  921. }
  922. }
  923. /*
  924. * Save the descriptor from vhost_get_vq_desc() to be used to
  925. * complete the virtio-scsi request in TCM callback context via
  926. * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
  927. */
  928. cmd->tvc_vq_desc = head;
  929. /*
  930. * Dispatch tv_cmd descriptor for cmwq execution in process
  931. * context provided by tcm_vhost_workqueue. This also ensures
  932. * tv_cmd is executed on the same kworker CPU as this vhost
  933. * thread to gain positive L2 cache locality effects..
  934. */
  935. INIT_WORK(&cmd->work, tcm_vhost_submission_work);
  936. queue_work(tcm_vhost_workqueue, &cmd->work);
  937. }
  938. mutex_unlock(&vq->mutex);
  939. return;
  940. err_free:
  941. vhost_scsi_free_cmd(cmd);
  942. err_cmd:
  943. vhost_scsi_send_bad_target(vs, vq, head, out);
  944. out:
  945. mutex_unlock(&vq->mutex);
  946. }
  947. static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
  948. {
  949. pr_debug("%s: The handling func for control queue.\n", __func__);
  950. }
  951. static void
  952. tcm_vhost_send_evt(struct vhost_scsi *vs,
  953. struct tcm_vhost_tpg *tpg,
  954. struct se_lun *lun,
  955. u32 event,
  956. u32 reason)
  957. {
  958. struct tcm_vhost_evt *evt;
  959. evt = tcm_vhost_allocate_evt(vs, event, reason);
  960. if (!evt)
  961. return;
  962. if (tpg && lun) {
  963. /* TODO: share lun setup code with virtio-scsi.ko */
  964. /*
  965. * Note: evt->event is zeroed when we allocate it and
  966. * lun[4-7] need to be zero according to virtio-scsi spec.
  967. */
  968. evt->event.lun[0] = 0x01;
  969. evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
  970. if (lun->unpacked_lun >= 256)
  971. evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
  972. evt->event.lun[3] = lun->unpacked_lun & 0xFF;
  973. }
  974. llist_add(&evt->list, &vs->vs_event_list);
  975. vhost_work_queue(&vs->dev, &vs->vs_event_work);
  976. }
  977. static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
  978. {
  979. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  980. poll.work);
  981. struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
  982. mutex_lock(&vq->mutex);
  983. if (!vq->private_data)
  984. goto out;
  985. if (vs->vs_events_missed)
  986. tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
  987. out:
  988. mutex_unlock(&vq->mutex);
  989. }
  990. static void vhost_scsi_handle_kick(struct vhost_work *work)
  991. {
  992. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  993. poll.work);
  994. struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
  995. vhost_scsi_handle_vq(vs, vq);
  996. }
  997. static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
  998. {
  999. vhost_poll_flush(&vs->vqs[index].vq.poll);
  1000. }
  1001. /* Callers must hold dev mutex */
  1002. static void vhost_scsi_flush(struct vhost_scsi *vs)
  1003. {
  1004. struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
  1005. int i;
  1006. /* Init new inflight and remember the old inflight */
  1007. tcm_vhost_init_inflight(vs, old_inflight);
  1008. /*
  1009. * The inflight->kref was initialized to 1. We decrement it here to
  1010. * indicate the start of the flush operation so that it will reach 0
  1011. * when all the reqs are finished.
  1012. */
  1013. for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
  1014. kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
  1015. /* Flush both the vhost poll and vhost work */
  1016. for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
  1017. vhost_scsi_flush_vq(vs, i);
  1018. vhost_work_flush(&vs->dev, &vs->vs_completion_work);
  1019. vhost_work_flush(&vs->dev, &vs->vs_event_work);
  1020. /* Wait for all reqs issued before the flush to be finished */
  1021. for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
  1022. wait_for_completion(&old_inflight[i]->comp);
  1023. }
  1024. /*
  1025. * Called from vhost_scsi_ioctl() context to walk the list of available
  1026. * tcm_vhost_tpg with an active struct tcm_vhost_nexus
  1027. *
  1028. * The lock nesting rule is:
  1029. * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
  1030. */
  1031. static int
  1032. vhost_scsi_set_endpoint(struct vhost_scsi *vs,
  1033. struct vhost_scsi_target *t)
  1034. {
  1035. struct tcm_vhost_tport *tv_tport;
  1036. struct tcm_vhost_tpg *tpg;
  1037. struct tcm_vhost_tpg **vs_tpg;
  1038. struct vhost_virtqueue *vq;
  1039. int index, ret, i, len;
  1040. bool match = false;
  1041. mutex_lock(&tcm_vhost_mutex);
  1042. mutex_lock(&vs->dev.mutex);
  1043. /* Verify that ring has been setup correctly. */
  1044. for (index = 0; index < vs->dev.nvqs; ++index) {
  1045. /* Verify that ring has been setup correctly. */
  1046. if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
  1047. ret = -EFAULT;
  1048. goto out;
  1049. }
  1050. }
  1051. len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
  1052. vs_tpg = kzalloc(len, GFP_KERNEL);
  1053. if (!vs_tpg) {
  1054. ret = -ENOMEM;
  1055. goto out;
  1056. }
  1057. if (vs->vs_tpg)
  1058. memcpy(vs_tpg, vs->vs_tpg, len);
  1059. list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
  1060. mutex_lock(&tpg->tv_tpg_mutex);
  1061. if (!tpg->tpg_nexus) {
  1062. mutex_unlock(&tpg->tv_tpg_mutex);
  1063. continue;
  1064. }
  1065. if (tpg->tv_tpg_vhost_count != 0) {
  1066. mutex_unlock(&tpg->tv_tpg_mutex);
  1067. continue;
  1068. }
  1069. tv_tport = tpg->tport;
  1070. if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
  1071. if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
  1072. kfree(vs_tpg);
  1073. mutex_unlock(&tpg->tv_tpg_mutex);
  1074. ret = -EEXIST;
  1075. goto out;
  1076. }
  1077. tpg->tv_tpg_vhost_count++;
  1078. tpg->vhost_scsi = vs;
  1079. vs_tpg[tpg->tport_tpgt] = tpg;
  1080. smp_mb__after_atomic_inc();
  1081. match = true;
  1082. }
  1083. mutex_unlock(&tpg->tv_tpg_mutex);
  1084. }
  1085. if (match) {
  1086. memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
  1087. sizeof(vs->vs_vhost_wwpn));
  1088. for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
  1089. vq = &vs->vqs[i].vq;
  1090. mutex_lock(&vq->mutex);
  1091. vq->private_data = vs_tpg;
  1092. vhost_init_used(vq);
  1093. mutex_unlock(&vq->mutex);
  1094. }
  1095. ret = 0;
  1096. } else {
  1097. ret = -EEXIST;
  1098. }
  1099. /*
  1100. * Act as synchronize_rcu to make sure access to
  1101. * old vs->vs_tpg is finished.
  1102. */
  1103. vhost_scsi_flush(vs);
  1104. kfree(vs->vs_tpg);
  1105. vs->vs_tpg = vs_tpg;
  1106. out:
  1107. mutex_unlock(&vs->dev.mutex);
  1108. mutex_unlock(&tcm_vhost_mutex);
  1109. return ret;
  1110. }
  1111. static int
  1112. vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
  1113. struct vhost_scsi_target *t)
  1114. {
  1115. struct tcm_vhost_tport *tv_tport;
  1116. struct tcm_vhost_tpg *tpg;
  1117. struct vhost_virtqueue *vq;
  1118. bool match = false;
  1119. int index, ret, i;
  1120. u8 target;
  1121. mutex_lock(&tcm_vhost_mutex);
  1122. mutex_lock(&vs->dev.mutex);
  1123. /* Verify that ring has been setup correctly. */
  1124. for (index = 0; index < vs->dev.nvqs; ++index) {
  1125. if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
  1126. ret = -EFAULT;
  1127. goto err_dev;
  1128. }
  1129. }
  1130. if (!vs->vs_tpg) {
  1131. ret = 0;
  1132. goto err_dev;
  1133. }
  1134. for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
  1135. target = i;
  1136. tpg = vs->vs_tpg[target];
  1137. if (!tpg)
  1138. continue;
  1139. mutex_lock(&tpg->tv_tpg_mutex);
  1140. tv_tport = tpg->tport;
  1141. if (!tv_tport) {
  1142. ret = -ENODEV;
  1143. goto err_tpg;
  1144. }
  1145. if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
  1146. pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
  1147. " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
  1148. tv_tport->tport_name, tpg->tport_tpgt,
  1149. t->vhost_wwpn, t->vhost_tpgt);
  1150. ret = -EINVAL;
  1151. goto err_tpg;
  1152. }
  1153. tpg->tv_tpg_vhost_count--;
  1154. tpg->vhost_scsi = NULL;
  1155. vs->vs_tpg[target] = NULL;
  1156. match = true;
  1157. mutex_unlock(&tpg->tv_tpg_mutex);
  1158. }
  1159. if (match) {
  1160. for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
  1161. vq = &vs->vqs[i].vq;
  1162. mutex_lock(&vq->mutex);
  1163. vq->private_data = NULL;
  1164. mutex_unlock(&vq->mutex);
  1165. }
  1166. }
  1167. /*
  1168. * Act as synchronize_rcu to make sure access to
  1169. * old vs->vs_tpg is finished.
  1170. */
  1171. vhost_scsi_flush(vs);
  1172. kfree(vs->vs_tpg);
  1173. vs->vs_tpg = NULL;
  1174. WARN_ON(vs->vs_events_nr);
  1175. mutex_unlock(&vs->dev.mutex);
  1176. mutex_unlock(&tcm_vhost_mutex);
  1177. return 0;
  1178. err_tpg:
  1179. mutex_unlock(&tpg->tv_tpg_mutex);
  1180. err_dev:
  1181. mutex_unlock(&vs->dev.mutex);
  1182. mutex_unlock(&tcm_vhost_mutex);
  1183. return ret;
  1184. }
  1185. static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
  1186. {
  1187. if (features & ~VHOST_SCSI_FEATURES)
  1188. return -EOPNOTSUPP;
  1189. mutex_lock(&vs->dev.mutex);
  1190. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  1191. !vhost_log_access_ok(&vs->dev)) {
  1192. mutex_unlock(&vs->dev.mutex);
  1193. return -EFAULT;
  1194. }
  1195. vs->dev.acked_features = features;
  1196. smp_wmb();
  1197. vhost_scsi_flush(vs);
  1198. mutex_unlock(&vs->dev.mutex);
  1199. return 0;
  1200. }
  1201. static void vhost_scsi_free(struct vhost_scsi *vs)
  1202. {
  1203. if (is_vmalloc_addr(vs))
  1204. vfree(vs);
  1205. else
  1206. kfree(vs);
  1207. }
  1208. static int vhost_scsi_open(struct inode *inode, struct file *f)
  1209. {
  1210. struct vhost_scsi *vs;
  1211. struct vhost_virtqueue **vqs;
  1212. int r = -ENOMEM, i;
  1213. vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
  1214. if (!vs) {
  1215. vs = vzalloc(sizeof(*vs));
  1216. if (!vs)
  1217. goto err_vs;
  1218. }
  1219. vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
  1220. if (!vqs)
  1221. goto err_vqs;
  1222. vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
  1223. vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
  1224. vs->vs_events_nr = 0;
  1225. vs->vs_events_missed = false;
  1226. vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
  1227. vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
  1228. vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
  1229. vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
  1230. for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
  1231. vqs[i] = &vs->vqs[i].vq;
  1232. vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
  1233. }
  1234. r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
  1235. tcm_vhost_init_inflight(vs, NULL);
  1236. if (r < 0)
  1237. goto err_init;
  1238. f->private_data = vs;
  1239. return 0;
  1240. err_init:
  1241. kfree(vqs);
  1242. err_vqs:
  1243. vhost_scsi_free(vs);
  1244. err_vs:
  1245. return r;
  1246. }
  1247. static int vhost_scsi_release(struct inode *inode, struct file *f)
  1248. {
  1249. struct vhost_scsi *vs = f->private_data;
  1250. struct vhost_scsi_target t;
  1251. mutex_lock(&vs->dev.mutex);
  1252. memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
  1253. mutex_unlock(&vs->dev.mutex);
  1254. vhost_scsi_clear_endpoint(vs, &t);
  1255. vhost_dev_stop(&vs->dev);
  1256. vhost_dev_cleanup(&vs->dev, false);
  1257. /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
  1258. vhost_scsi_flush(vs);
  1259. kfree(vs->dev.vqs);
  1260. vhost_scsi_free(vs);
  1261. return 0;
  1262. }
  1263. static long
  1264. vhost_scsi_ioctl(struct file *f,
  1265. unsigned int ioctl,
  1266. unsigned long arg)
  1267. {
  1268. struct vhost_scsi *vs = f->private_data;
  1269. struct vhost_scsi_target backend;
  1270. void __user *argp = (void __user *)arg;
  1271. u64 __user *featurep = argp;
  1272. u32 __user *eventsp = argp;
  1273. u32 events_missed;
  1274. u64 features;
  1275. int r, abi_version = VHOST_SCSI_ABI_VERSION;
  1276. struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
  1277. switch (ioctl) {
  1278. case VHOST_SCSI_SET_ENDPOINT:
  1279. if (copy_from_user(&backend, argp, sizeof backend))
  1280. return -EFAULT;
  1281. if (backend.reserved != 0)
  1282. return -EOPNOTSUPP;
  1283. return vhost_scsi_set_endpoint(vs, &backend);
  1284. case VHOST_SCSI_CLEAR_ENDPOINT:
  1285. if (copy_from_user(&backend, argp, sizeof backend))
  1286. return -EFAULT;
  1287. if (backend.reserved != 0)
  1288. return -EOPNOTSUPP;
  1289. return vhost_scsi_clear_endpoint(vs, &backend);
  1290. case VHOST_SCSI_GET_ABI_VERSION:
  1291. if (copy_to_user(argp, &abi_version, sizeof abi_version))
  1292. return -EFAULT;
  1293. return 0;
  1294. case VHOST_SCSI_SET_EVENTS_MISSED:
  1295. if (get_user(events_missed, eventsp))
  1296. return -EFAULT;
  1297. mutex_lock(&vq->mutex);
  1298. vs->vs_events_missed = events_missed;
  1299. mutex_unlock(&vq->mutex);
  1300. return 0;
  1301. case VHOST_SCSI_GET_EVENTS_MISSED:
  1302. mutex_lock(&vq->mutex);
  1303. events_missed = vs->vs_events_missed;
  1304. mutex_unlock(&vq->mutex);
  1305. if (put_user(events_missed, eventsp))
  1306. return -EFAULT;
  1307. return 0;
  1308. case VHOST_GET_FEATURES:
  1309. features = VHOST_SCSI_FEATURES;
  1310. if (copy_to_user(featurep, &features, sizeof features))
  1311. return -EFAULT;
  1312. return 0;
  1313. case VHOST_SET_FEATURES:
  1314. if (copy_from_user(&features, featurep, sizeof features))
  1315. return -EFAULT;
  1316. return vhost_scsi_set_features(vs, features);
  1317. default:
  1318. mutex_lock(&vs->dev.mutex);
  1319. r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
  1320. /* TODO: flush backend after dev ioctl. */
  1321. if (r == -ENOIOCTLCMD)
  1322. r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
  1323. mutex_unlock(&vs->dev.mutex);
  1324. return r;
  1325. }
  1326. }
  1327. #ifdef CONFIG_COMPAT
  1328. static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
  1329. unsigned long arg)
  1330. {
  1331. return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
  1332. }
  1333. #endif
  1334. static const struct file_operations vhost_scsi_fops = {
  1335. .owner = THIS_MODULE,
  1336. .release = vhost_scsi_release,
  1337. .unlocked_ioctl = vhost_scsi_ioctl,
  1338. #ifdef CONFIG_COMPAT
  1339. .compat_ioctl = vhost_scsi_compat_ioctl,
  1340. #endif
  1341. .open = vhost_scsi_open,
  1342. .llseek = noop_llseek,
  1343. };
  1344. static struct miscdevice vhost_scsi_misc = {
  1345. MISC_DYNAMIC_MINOR,
  1346. "vhost-scsi",
  1347. &vhost_scsi_fops,
  1348. };
  1349. static int __init vhost_scsi_register(void)
  1350. {
  1351. return misc_register(&vhost_scsi_misc);
  1352. }
  1353. static int vhost_scsi_deregister(void)
  1354. {
  1355. return misc_deregister(&vhost_scsi_misc);
  1356. }
  1357. static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
  1358. {
  1359. switch (tport->tport_proto_id) {
  1360. case SCSI_PROTOCOL_SAS:
  1361. return "SAS";
  1362. case SCSI_PROTOCOL_FCP:
  1363. return "FCP";
  1364. case SCSI_PROTOCOL_ISCSI:
  1365. return "iSCSI";
  1366. default:
  1367. break;
  1368. }
  1369. return "Unknown";
  1370. }
  1371. static void
  1372. tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
  1373. struct se_lun *lun, bool plug)
  1374. {
  1375. struct vhost_scsi *vs = tpg->vhost_scsi;
  1376. struct vhost_virtqueue *vq;
  1377. u32 reason;
  1378. if (!vs)
  1379. return;
  1380. mutex_lock(&vs->dev.mutex);
  1381. if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
  1382. mutex_unlock(&vs->dev.mutex);
  1383. return;
  1384. }
  1385. if (plug)
  1386. reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
  1387. else
  1388. reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
  1389. vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
  1390. mutex_lock(&vq->mutex);
  1391. tcm_vhost_send_evt(vs, tpg, lun,
  1392. VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
  1393. mutex_unlock(&vq->mutex);
  1394. mutex_unlock(&vs->dev.mutex);
  1395. }
  1396. static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
  1397. {
  1398. tcm_vhost_do_plug(tpg, lun, true);
  1399. }
  1400. static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
  1401. {
  1402. tcm_vhost_do_plug(tpg, lun, false);
  1403. }
  1404. static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
  1405. struct se_lun *lun)
  1406. {
  1407. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  1408. struct tcm_vhost_tpg, se_tpg);
  1409. mutex_lock(&tcm_vhost_mutex);
  1410. mutex_lock(&tpg->tv_tpg_mutex);
  1411. tpg->tv_tpg_port_count++;
  1412. mutex_unlock(&tpg->tv_tpg_mutex);
  1413. tcm_vhost_hotplug(tpg, lun);
  1414. mutex_unlock(&tcm_vhost_mutex);
  1415. return 0;
  1416. }
  1417. static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
  1418. struct se_lun *lun)
  1419. {
  1420. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  1421. struct tcm_vhost_tpg, se_tpg);
  1422. mutex_lock(&tcm_vhost_mutex);
  1423. mutex_lock(&tpg->tv_tpg_mutex);
  1424. tpg->tv_tpg_port_count--;
  1425. mutex_unlock(&tpg->tv_tpg_mutex);
  1426. tcm_vhost_hotunplug(tpg, lun);
  1427. mutex_unlock(&tcm_vhost_mutex);
  1428. }
  1429. static struct se_node_acl *
  1430. tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
  1431. struct config_group *group,
  1432. const char *name)
  1433. {
  1434. struct se_node_acl *se_nacl, *se_nacl_new;
  1435. struct tcm_vhost_nacl *nacl;
  1436. u64 wwpn = 0;
  1437. u32 nexus_depth;
  1438. /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
  1439. return ERR_PTR(-EINVAL); */
  1440. se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
  1441. if (!se_nacl_new)
  1442. return ERR_PTR(-ENOMEM);
  1443. nexus_depth = 1;
  1444. /*
  1445. * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
  1446. * when converting a NodeACL from demo mode -> explict
  1447. */
  1448. se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
  1449. name, nexus_depth);
  1450. if (IS_ERR(se_nacl)) {
  1451. tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
  1452. return se_nacl;
  1453. }
  1454. /*
  1455. * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
  1456. */
  1457. nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
  1458. nacl->iport_wwpn = wwpn;
  1459. return se_nacl;
  1460. }
  1461. static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
  1462. {
  1463. struct tcm_vhost_nacl *nacl = container_of(se_acl,
  1464. struct tcm_vhost_nacl, se_node_acl);
  1465. core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
  1466. kfree(nacl);
  1467. }
  1468. static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
  1469. struct se_session *se_sess)
  1470. {
  1471. struct tcm_vhost_cmd *tv_cmd;
  1472. unsigned int i;
  1473. if (!se_sess->sess_cmd_map)
  1474. return;
  1475. for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
  1476. tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
  1477. kfree(tv_cmd->tvc_sgl);
  1478. kfree(tv_cmd->tvc_upages);
  1479. }
  1480. }
  1481. static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
  1482. const char *name)
  1483. {
  1484. struct se_portal_group *se_tpg;
  1485. struct se_session *se_sess;
  1486. struct tcm_vhost_nexus *tv_nexus;
  1487. struct tcm_vhost_cmd *tv_cmd;
  1488. unsigned int i;
  1489. mutex_lock(&tpg->tv_tpg_mutex);
  1490. if (tpg->tpg_nexus) {
  1491. mutex_unlock(&tpg->tv_tpg_mutex);
  1492. pr_debug("tpg->tpg_nexus already exists\n");
  1493. return -EEXIST;
  1494. }
  1495. se_tpg = &tpg->se_tpg;
  1496. tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
  1497. if (!tv_nexus) {
  1498. mutex_unlock(&tpg->tv_tpg_mutex);
  1499. pr_err("Unable to allocate struct tcm_vhost_nexus\n");
  1500. return -ENOMEM;
  1501. }
  1502. /*
  1503. * Initialize the struct se_session pointer and setup tagpool
  1504. * for struct tcm_vhost_cmd descriptors
  1505. */
  1506. tv_nexus->tvn_se_sess = transport_init_session_tags(
  1507. TCM_VHOST_DEFAULT_TAGS,
  1508. sizeof(struct tcm_vhost_cmd));
  1509. if (IS_ERR(tv_nexus->tvn_se_sess)) {
  1510. mutex_unlock(&tpg->tv_tpg_mutex);
  1511. kfree(tv_nexus);
  1512. return -ENOMEM;
  1513. }
  1514. se_sess = tv_nexus->tvn_se_sess;
  1515. for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
  1516. tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
  1517. tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
  1518. TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
  1519. if (!tv_cmd->tvc_sgl) {
  1520. mutex_unlock(&tpg->tv_tpg_mutex);
  1521. pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
  1522. goto out;
  1523. }
  1524. tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
  1525. TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL);
  1526. if (!tv_cmd->tvc_upages) {
  1527. mutex_unlock(&tpg->tv_tpg_mutex);
  1528. pr_err("Unable to allocate tv_cmd->tvc_upages\n");
  1529. goto out;
  1530. }
  1531. }
  1532. /*
  1533. * Since we are running in 'demo mode' this call with generate a
  1534. * struct se_node_acl for the tcm_vhost struct se_portal_group with
  1535. * the SCSI Initiator port name of the passed configfs group 'name'.
  1536. */
  1537. tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
  1538. se_tpg, (unsigned char *)name);
  1539. if (!tv_nexus->tvn_se_sess->se_node_acl) {
  1540. mutex_unlock(&tpg->tv_tpg_mutex);
  1541. pr_debug("core_tpg_check_initiator_node_acl() failed"
  1542. " for %s\n", name);
  1543. goto out;
  1544. }
  1545. /*
  1546. * Now register the TCM vhost virtual I_T Nexus as active with the
  1547. * call to __transport_register_session()
  1548. */
  1549. __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
  1550. tv_nexus->tvn_se_sess, tv_nexus);
  1551. tpg->tpg_nexus = tv_nexus;
  1552. mutex_unlock(&tpg->tv_tpg_mutex);
  1553. return 0;
  1554. out:
  1555. tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
  1556. transport_free_session(se_sess);
  1557. kfree(tv_nexus);
  1558. return -ENOMEM;
  1559. }
  1560. static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
  1561. {
  1562. struct se_session *se_sess;
  1563. struct tcm_vhost_nexus *tv_nexus;
  1564. mutex_lock(&tpg->tv_tpg_mutex);
  1565. tv_nexus = tpg->tpg_nexus;
  1566. if (!tv_nexus) {
  1567. mutex_unlock(&tpg->tv_tpg_mutex);
  1568. return -ENODEV;
  1569. }
  1570. se_sess = tv_nexus->tvn_se_sess;
  1571. if (!se_sess) {
  1572. mutex_unlock(&tpg->tv_tpg_mutex);
  1573. return -ENODEV;
  1574. }
  1575. if (tpg->tv_tpg_port_count != 0) {
  1576. mutex_unlock(&tpg->tv_tpg_mutex);
  1577. pr_err("Unable to remove TCM_vhost I_T Nexus with"
  1578. " active TPG port count: %d\n",
  1579. tpg->tv_tpg_port_count);
  1580. return -EBUSY;
  1581. }
  1582. if (tpg->tv_tpg_vhost_count != 0) {
  1583. mutex_unlock(&tpg->tv_tpg_mutex);
  1584. pr_err("Unable to remove TCM_vhost I_T Nexus with"
  1585. " active TPG vhost count: %d\n",
  1586. tpg->tv_tpg_vhost_count);
  1587. return -EBUSY;
  1588. }
  1589. pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
  1590. " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
  1591. tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
  1592. tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
  1593. /*
  1594. * Release the SCSI I_T Nexus to the emulated vhost Target Port
  1595. */
  1596. transport_deregister_session(tv_nexus->tvn_se_sess);
  1597. tpg->tpg_nexus = NULL;
  1598. mutex_unlock(&tpg->tv_tpg_mutex);
  1599. kfree(tv_nexus);
  1600. return 0;
  1601. }
  1602. static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
  1603. char *page)
  1604. {
  1605. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  1606. struct tcm_vhost_tpg, se_tpg);
  1607. struct tcm_vhost_nexus *tv_nexus;
  1608. ssize_t ret;
  1609. mutex_lock(&tpg->tv_tpg_mutex);
  1610. tv_nexus = tpg->tpg_nexus;
  1611. if (!tv_nexus) {
  1612. mutex_unlock(&tpg->tv_tpg_mutex);
  1613. return -ENODEV;
  1614. }
  1615. ret = snprintf(page, PAGE_SIZE, "%s\n",
  1616. tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
  1617. mutex_unlock(&tpg->tv_tpg_mutex);
  1618. return ret;
  1619. }
  1620. static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
  1621. const char *page,
  1622. size_t count)
  1623. {
  1624. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  1625. struct tcm_vhost_tpg, se_tpg);
  1626. struct tcm_vhost_tport *tport_wwn = tpg->tport;
  1627. unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
  1628. int ret;
  1629. /*
  1630. * Shutdown the active I_T nexus if 'NULL' is passed..
  1631. */
  1632. if (!strncmp(page, "NULL", 4)) {
  1633. ret = tcm_vhost_drop_nexus(tpg);
  1634. return (!ret) ? count : ret;
  1635. }
  1636. /*
  1637. * Otherwise make sure the passed virtual Initiator port WWN matches
  1638. * the fabric protocol_id set in tcm_vhost_make_tport(), and call
  1639. * tcm_vhost_make_nexus().
  1640. */
  1641. if (strlen(page) >= TCM_VHOST_NAMELEN) {
  1642. pr_err("Emulated NAA Sas Address: %s, exceeds"
  1643. " max: %d\n", page, TCM_VHOST_NAMELEN);
  1644. return -EINVAL;
  1645. }
  1646. snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
  1647. ptr = strstr(i_port, "naa.");
  1648. if (ptr) {
  1649. if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
  1650. pr_err("Passed SAS Initiator Port %s does not"
  1651. " match target port protoid: %s\n", i_port,
  1652. tcm_vhost_dump_proto_id(tport_wwn));
  1653. return -EINVAL;
  1654. }
  1655. port_ptr = &i_port[0];
  1656. goto check_newline;
  1657. }
  1658. ptr = strstr(i_port, "fc.");
  1659. if (ptr) {
  1660. if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
  1661. pr_err("Passed FCP Initiator Port %s does not"
  1662. " match target port protoid: %s\n", i_port,
  1663. tcm_vhost_dump_proto_id(tport_wwn));
  1664. return -EINVAL;
  1665. }
  1666. port_ptr = &i_port[3]; /* Skip over "fc." */
  1667. goto check_newline;
  1668. }
  1669. ptr = strstr(i_port, "iqn.");
  1670. if (ptr) {
  1671. if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
  1672. pr_err("Passed iSCSI Initiator Port %s does not"
  1673. " match target port protoid: %s\n", i_port,
  1674. tcm_vhost_dump_proto_id(tport_wwn));
  1675. return -EINVAL;
  1676. }
  1677. port_ptr = &i_port[0];
  1678. goto check_newline;
  1679. }
  1680. pr_err("Unable to locate prefix for emulated Initiator Port:"
  1681. " %s\n", i_port);
  1682. return -EINVAL;
  1683. /*
  1684. * Clear any trailing newline for the NAA WWN
  1685. */
  1686. check_newline:
  1687. if (i_port[strlen(i_port)-1] == '\n')
  1688. i_port[strlen(i_port)-1] = '\0';
  1689. ret = tcm_vhost_make_nexus(tpg, port_ptr);
  1690. if (ret < 0)
  1691. return ret;
  1692. return count;
  1693. }
  1694. TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
  1695. static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
  1696. &tcm_vhost_tpg_nexus.attr,
  1697. NULL,
  1698. };
  1699. static struct se_portal_group *
  1700. tcm_vhost_make_tpg(struct se_wwn *wwn,
  1701. struct config_group *group,
  1702. const char *name)
  1703. {
  1704. struct tcm_vhost_tport *tport = container_of(wwn,
  1705. struct tcm_vhost_tport, tport_wwn);
  1706. struct tcm_vhost_tpg *tpg;
  1707. unsigned long tpgt;
  1708. int ret;
  1709. if (strstr(name, "tpgt_") != name)
  1710. return ERR_PTR(-EINVAL);
  1711. if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
  1712. return ERR_PTR(-EINVAL);
  1713. tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
  1714. if (!tpg) {
  1715. pr_err("Unable to allocate struct tcm_vhost_tpg");
  1716. return ERR_PTR(-ENOMEM);
  1717. }
  1718. mutex_init(&tpg->tv_tpg_mutex);
  1719. INIT_LIST_HEAD(&tpg->tv_tpg_list);
  1720. tpg->tport = tport;
  1721. tpg->tport_tpgt = tpgt;
  1722. ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
  1723. &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
  1724. if (ret < 0) {
  1725. kfree(tpg);
  1726. return NULL;
  1727. }
  1728. mutex_lock(&tcm_vhost_mutex);
  1729. list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
  1730. mutex_unlock(&tcm_vhost_mutex);
  1731. return &tpg->se_tpg;
  1732. }
  1733. static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
  1734. {
  1735. struct tcm_vhost_tpg *tpg = container_of(se_tpg,
  1736. struct tcm_vhost_tpg, se_tpg);
  1737. mutex_lock(&tcm_vhost_mutex);
  1738. list_del(&tpg->tv_tpg_list);
  1739. mutex_unlock(&tcm_vhost_mutex);
  1740. /*
  1741. * Release the virtual I_T Nexus for this vhost TPG
  1742. */
  1743. tcm_vhost_drop_nexus(tpg);
  1744. /*
  1745. * Deregister the se_tpg from TCM..
  1746. */
  1747. core_tpg_deregister(se_tpg);
  1748. kfree(tpg);
  1749. }
  1750. static struct se_wwn *
  1751. tcm_vhost_make_tport(struct target_fabric_configfs *tf,
  1752. struct config_group *group,
  1753. const char *name)
  1754. {
  1755. struct tcm_vhost_tport *tport;
  1756. char *ptr;
  1757. u64 wwpn = 0;
  1758. int off = 0;
  1759. /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
  1760. return ERR_PTR(-EINVAL); */
  1761. tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
  1762. if (!tport) {
  1763. pr_err("Unable to allocate struct tcm_vhost_tport");
  1764. return ERR_PTR(-ENOMEM);
  1765. }
  1766. tport->tport_wwpn = wwpn;
  1767. /*
  1768. * Determine the emulated Protocol Identifier and Target Port Name
  1769. * based on the incoming configfs directory name.
  1770. */
  1771. ptr = strstr(name, "naa.");
  1772. if (ptr) {
  1773. tport->tport_proto_id = SCSI_PROTOCOL_SAS;
  1774. goto check_len;
  1775. }
  1776. ptr = strstr(name, "fc.");
  1777. if (ptr) {
  1778. tport->tport_proto_id = SCSI_PROTOCOL_FCP;
  1779. off = 3; /* Skip over "fc." */
  1780. goto check_len;
  1781. }
  1782. ptr = strstr(name, "iqn.");
  1783. if (ptr) {
  1784. tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
  1785. goto check_len;
  1786. }
  1787. pr_err("Unable to locate prefix for emulated Target Port:"
  1788. " %s\n", name);
  1789. kfree(tport);
  1790. return ERR_PTR(-EINVAL);
  1791. check_len:
  1792. if (strlen(name) >= TCM_VHOST_NAMELEN) {
  1793. pr_err("Emulated %s Address: %s, exceeds"
  1794. " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
  1795. TCM_VHOST_NAMELEN);
  1796. kfree(tport);
  1797. return ERR_PTR(-EINVAL);
  1798. }
  1799. snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
  1800. pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
  1801. " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
  1802. return &tport->tport_wwn;
  1803. }
  1804. static void tcm_vhost_drop_tport(struct se_wwn *wwn)
  1805. {
  1806. struct tcm_vhost_tport *tport = container_of(wwn,
  1807. struct tcm_vhost_tport, tport_wwn);
  1808. pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
  1809. " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
  1810. tport->tport_name);
  1811. kfree(tport);
  1812. }
  1813. static ssize_t
  1814. tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
  1815. char *page)
  1816. {
  1817. return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
  1818. "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
  1819. utsname()->machine);
  1820. }
  1821. TF_WWN_ATTR_RO(tcm_vhost, version);
  1822. static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
  1823. &tcm_vhost_wwn_version.attr,
  1824. NULL,
  1825. };
  1826. static struct target_core_fabric_ops tcm_vhost_ops = {
  1827. .get_fabric_name = tcm_vhost_get_fabric_name,
  1828. .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
  1829. .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
  1830. .tpg_get_tag = tcm_vhost_get_tag,
  1831. .tpg_get_default_depth = tcm_vhost_get_default_depth,
  1832. .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
  1833. .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
  1834. .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
  1835. .tpg_check_demo_mode = tcm_vhost_check_true,
  1836. .tpg_check_demo_mode_cache = tcm_vhost_check_true,
  1837. .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
  1838. .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
  1839. .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
  1840. .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
  1841. .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
  1842. .release_cmd = tcm_vhost_release_cmd,
  1843. .check_stop_free = vhost_scsi_check_stop_free,
  1844. .shutdown_session = tcm_vhost_shutdown_session,
  1845. .close_session = tcm_vhost_close_session,
  1846. .sess_get_index = tcm_vhost_sess_get_index,
  1847. .sess_get_initiator_sid = NULL,
  1848. .write_pending = tcm_vhost_write_pending,
  1849. .write_pending_status = tcm_vhost_write_pending_status,
  1850. .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
  1851. .get_task_tag = tcm_vhost_get_task_tag,
  1852. .get_cmd_state = tcm_vhost_get_cmd_state,
  1853. .queue_data_in = tcm_vhost_queue_data_in,
  1854. .queue_status = tcm_vhost_queue_status,
  1855. .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
  1856. /*
  1857. * Setup callers for generic logic in target_core_fabric_configfs.c
  1858. */
  1859. .fabric_make_wwn = tcm_vhost_make_tport,
  1860. .fabric_drop_wwn = tcm_vhost_drop_tport,
  1861. .fabric_make_tpg = tcm_vhost_make_tpg,
  1862. .fabric_drop_tpg = tcm_vhost_drop_tpg,
  1863. .fabric_post_link = tcm_vhost_port_link,
  1864. .fabric_pre_unlink = tcm_vhost_port_unlink,
  1865. .fabric_make_np = NULL,
  1866. .fabric_drop_np = NULL,
  1867. .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
  1868. .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
  1869. };
  1870. static int tcm_vhost_register_configfs(void)
  1871. {
  1872. struct target_fabric_configfs *fabric;
  1873. int ret;
  1874. pr_debug("TCM_VHOST fabric module %s on %s/%s"
  1875. " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
  1876. utsname()->machine);
  1877. /*
  1878. * Register the top level struct config_item_type with TCM core
  1879. */
  1880. fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
  1881. if (IS_ERR(fabric)) {
  1882. pr_err("target_fabric_configfs_init() failed\n");
  1883. return PTR_ERR(fabric);
  1884. }
  1885. /*
  1886. * Setup fabric->tf_ops from our local tcm_vhost_ops
  1887. */
  1888. fabric->tf_ops = tcm_vhost_ops;
  1889. /*
  1890. * Setup default attribute lists for various fabric->tf_cit_tmpl
  1891. */
  1892. TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
  1893. TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
  1894. TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
  1895. TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
  1896. TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
  1897. TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
  1898. TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
  1899. TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
  1900. TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
  1901. /*
  1902. * Register the fabric for use within TCM
  1903. */
  1904. ret = target_fabric_configfs_register(fabric);
  1905. if (ret < 0) {
  1906. pr_err("target_fabric_configfs_register() failed"
  1907. " for TCM_VHOST\n");
  1908. return ret;
  1909. }
  1910. /*
  1911. * Setup our local pointer to *fabric
  1912. */
  1913. tcm_vhost_fabric_configfs = fabric;
  1914. pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
  1915. return 0;
  1916. };
  1917. static void tcm_vhost_deregister_configfs(void)
  1918. {
  1919. if (!tcm_vhost_fabric_configfs)
  1920. return;
  1921. target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
  1922. tcm_vhost_fabric_configfs = NULL;
  1923. pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
  1924. };
  1925. static int __init tcm_vhost_init(void)
  1926. {
  1927. int ret = -ENOMEM;
  1928. /*
  1929. * Use our own dedicated workqueue for submitting I/O into
  1930. * target core to avoid contention within system_wq.
  1931. */
  1932. tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
  1933. if (!tcm_vhost_workqueue)
  1934. goto out;
  1935. ret = vhost_scsi_register();
  1936. if (ret < 0)
  1937. goto out_destroy_workqueue;
  1938. ret = tcm_vhost_register_configfs();
  1939. if (ret < 0)
  1940. goto out_vhost_scsi_deregister;
  1941. return 0;
  1942. out_vhost_scsi_deregister:
  1943. vhost_scsi_deregister();
  1944. out_destroy_workqueue:
  1945. destroy_workqueue(tcm_vhost_workqueue);
  1946. out:
  1947. return ret;
  1948. };
  1949. static void tcm_vhost_exit(void)
  1950. {
  1951. tcm_vhost_deregister_configfs();
  1952. vhost_scsi_deregister();
  1953. destroy_workqueue(tcm_vhost_workqueue);
  1954. };
  1955. MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
  1956. MODULE_ALIAS("tcm_vhost");
  1957. MODULE_LICENSE("GPL");
  1958. module_init(tcm_vhost_init);
  1959. module_exit(tcm_vhost_exit);