tcm_loop.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561
  1. /*******************************************************************************
  2. *
  3. * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
  4. * for emulated SAS initiator ports
  5. *
  6. * © Copyright 2011 RisingTide Systems LLC.
  7. *
  8. * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
  9. *
  10. * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. ****************************************************************************/
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/init.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include <linux/configfs.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_transport.h>
  35. #include <target/target_core_fabric_ops.h>
  36. #include <target/target_core_fabric_configfs.h>
  37. #include <target/target_core_fabric_lib.h>
  38. #include <target/target_core_configfs.h>
  39. #include <target/target_core_device.h>
  40. #include <target/target_core_tpg.h>
  41. #include <target/target_core_tmr.h>
  42. #include "tcm_loop.h"
  43. #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
  44. /* Local pointer to allocated TCM configfs fabric module */
  45. static struct target_fabric_configfs *tcm_loop_fabric_configfs;
  46. static struct kmem_cache *tcm_loop_cmd_cache;
  47. static int tcm_loop_hba_no_cnt;
  48. /*
  49. * Allocate a tcm_loop cmd descriptor from target_core_mod code
  50. *
  51. * Can be called from interrupt context in tcm_loop_queuecommand() below
  52. */
  53. static struct se_cmd *tcm_loop_allocate_core_cmd(
  54. struct tcm_loop_hba *tl_hba,
  55. struct se_portal_group *se_tpg,
  56. struct scsi_cmnd *sc)
  57. {
  58. struct se_cmd *se_cmd;
  59. struct se_session *se_sess;
  60. struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
  61. struct tcm_loop_cmd *tl_cmd;
  62. int sam_task_attr;
  63. if (!tl_nexus) {
  64. scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
  65. " does not exist\n");
  66. set_host_byte(sc, DID_ERROR);
  67. return NULL;
  68. }
  69. se_sess = tl_nexus->se_sess;
  70. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
  71. if (!tl_cmd) {
  72. pr_err("Unable to allocate struct tcm_loop_cmd\n");
  73. set_host_byte(sc, DID_ERROR);
  74. return NULL;
  75. }
  76. se_cmd = &tl_cmd->tl_se_cmd;
  77. /*
  78. * Save the pointer to struct scsi_cmnd *sc
  79. */
  80. tl_cmd->sc = sc;
  81. /*
  82. * Locate the SAM Task Attr from struct scsi_cmnd *
  83. */
  84. if (sc->device->tagged_supported) {
  85. switch (sc->tag) {
  86. case HEAD_OF_QUEUE_TAG:
  87. sam_task_attr = MSG_HEAD_TAG;
  88. break;
  89. case ORDERED_QUEUE_TAG:
  90. sam_task_attr = MSG_ORDERED_TAG;
  91. break;
  92. default:
  93. sam_task_attr = MSG_SIMPLE_TAG;
  94. break;
  95. }
  96. } else
  97. sam_task_attr = MSG_SIMPLE_TAG;
  98. /*
  99. * Initialize struct se_cmd descriptor from target_core_mod infrastructure
  100. */
  101. transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
  102. scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
  103. &tl_cmd->tl_sense_buf[0]);
  104. /*
  105. * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
  106. */
  107. if (scsi_bidi_cmnd(sc))
  108. se_cmd->t_tasks_bidi = 1;
  109. /*
  110. * Locate the struct se_lun pointer and attach it to struct se_cmd
  111. */
  112. if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
  113. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  114. set_host_byte(sc, DID_NO_CONNECT);
  115. return NULL;
  116. }
  117. return se_cmd;
  118. }
  119. /*
  120. * Called by struct target_core_fabric_ops->new_cmd_map()
  121. *
  122. * Always called in process context. A non zero return value
  123. * here will signal to handle an exception based on the return code.
  124. */
  125. static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
  126. {
  127. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  128. struct tcm_loop_cmd, tl_se_cmd);
  129. struct scsi_cmnd *sc = tl_cmd->sc;
  130. struct scatterlist *sgl_bidi = NULL;
  131. u32 sgl_bidi_count = 0;
  132. int ret;
  133. /*
  134. * Allocate the necessary tasks to complete the received CDB+data
  135. */
  136. ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
  137. if (ret == -ENOMEM) {
  138. /* Out of Resources */
  139. return PYX_TRANSPORT_LU_COMM_FAILURE;
  140. } else if (ret == -EINVAL) {
  141. /*
  142. * Handle case for SAM_STAT_RESERVATION_CONFLICT
  143. */
  144. if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
  145. return PYX_TRANSPORT_RESERVATION_CONFLICT;
  146. /*
  147. * Otherwise, return SAM_STAT_CHECK_CONDITION and return
  148. * sense data.
  149. */
  150. return PYX_TRANSPORT_USE_SENSE_REASON;
  151. }
  152. /*
  153. * For BIDI commands, pass in the extra READ buffer
  154. * to transport_generic_map_mem_to_cmd() below..
  155. */
  156. if (se_cmd->t_tasks_bidi) {
  157. struct scsi_data_buffer *sdb = scsi_in(sc);
  158. sgl_bidi = sdb->table.sgl;
  159. sgl_bidi_count = sdb->table.nents;
  160. }
  161. /*
  162. * Because some userspace code via scsi-generic do not memset their
  163. * associated read buffers, go ahead and do that here for type
  164. * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
  165. * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
  166. * by target core in transport_generic_allocate_tasks() ->
  167. * transport_generic_cmd_sequencer().
  168. */
  169. if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
  170. se_cmd->data_direction == DMA_FROM_DEVICE) {
  171. struct scatterlist *sg = scsi_sglist(sc);
  172. unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
  173. if (buf != NULL) {
  174. memset(buf, 0, sg->length);
  175. kunmap(sg_page(sg));
  176. }
  177. }
  178. /* Tell the core about our preallocated memory */
  179. ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
  180. scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
  181. if (ret < 0)
  182. return PYX_TRANSPORT_LU_COMM_FAILURE;
  183. return 0;
  184. }
  185. /*
  186. * Called from struct target_core_fabric_ops->check_stop_free()
  187. */
  188. static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  189. {
  190. /*
  191. * Do not release struct se_cmd's containing a valid TMR
  192. * pointer. These will be released directly in tcm_loop_device_reset()
  193. * with transport_generic_free_cmd().
  194. */
  195. if (se_cmd->se_tmr_req)
  196. return 0;
  197. /*
  198. * Release the struct se_cmd, which will make a callback to release
  199. * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
  200. */
  201. transport_generic_free_cmd(se_cmd, 0);
  202. return 1;
  203. }
  204. static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  205. {
  206. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  207. struct tcm_loop_cmd, tl_se_cmd);
  208. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  209. }
  210. static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer,
  211. char **start, off_t offset,
  212. int length, int inout)
  213. {
  214. return sprintf(buffer, "tcm_loop_proc_info()\n");
  215. }
  216. static int tcm_loop_driver_probe(struct device *);
  217. static int tcm_loop_driver_remove(struct device *);
  218. static int pseudo_lld_bus_match(struct device *dev,
  219. struct device_driver *dev_driver)
  220. {
  221. return 1;
  222. }
  223. static struct bus_type tcm_loop_lld_bus = {
  224. .name = "tcm_loop_bus",
  225. .match = pseudo_lld_bus_match,
  226. .probe = tcm_loop_driver_probe,
  227. .remove = tcm_loop_driver_remove,
  228. };
  229. static struct device_driver tcm_loop_driverfs = {
  230. .name = "tcm_loop",
  231. .bus = &tcm_loop_lld_bus,
  232. };
  233. /*
  234. * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  235. */
  236. struct device *tcm_loop_primary;
  237. /*
  238. * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
  239. * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
  240. */
  241. static int tcm_loop_change_queue_depth(
  242. struct scsi_device *sdev,
  243. int depth,
  244. int reason)
  245. {
  246. switch (reason) {
  247. case SCSI_QDEPTH_DEFAULT:
  248. scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
  249. break;
  250. case SCSI_QDEPTH_QFULL:
  251. scsi_track_queue_full(sdev, depth);
  252. break;
  253. case SCSI_QDEPTH_RAMP_UP:
  254. scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
  255. break;
  256. default:
  257. return -EOPNOTSUPP;
  258. }
  259. return sdev->queue_depth;
  260. }
  261. /*
  262. * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data
  263. * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs)
  264. */
  265. static int tcm_loop_queuecommand(
  266. struct Scsi_Host *sh,
  267. struct scsi_cmnd *sc)
  268. {
  269. struct se_cmd *se_cmd;
  270. struct se_portal_group *se_tpg;
  271. struct tcm_loop_hba *tl_hba;
  272. struct tcm_loop_tpg *tl_tpg;
  273. pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
  274. " scsi_buf_len: %u\n", sc->device->host->host_no,
  275. sc->device->id, sc->device->channel, sc->device->lun,
  276. sc->cmnd[0], scsi_bufflen(sc));
  277. /*
  278. * Locate the tcm_loop_hba_t pointer
  279. */
  280. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  281. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  282. /*
  283. * Ensure that this tl_tpg reference from the incoming sc->device->id
  284. * has already been configured via tcm_loop_make_naa_tpg().
  285. */
  286. if (!tl_tpg->tl_hba) {
  287. set_host_byte(sc, DID_NO_CONNECT);
  288. sc->scsi_done(sc);
  289. return 0;
  290. }
  291. se_tpg = &tl_tpg->tl_se_tpg;
  292. /*
  293. * Determine the SAM Task Attribute and allocate tl_cmd and
  294. * tl_cmd->tl_se_cmd from TCM infrastructure
  295. */
  296. se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc);
  297. if (!se_cmd) {
  298. sc->scsi_done(sc);
  299. return 0;
  300. }
  301. /*
  302. * Queue up the newly allocated to be processed in TCM thread context.
  303. */
  304. transport_generic_handle_cdb_map(se_cmd);
  305. return 0;
  306. }
  307. /*
  308. * Called from SCSI EH process context to issue a LUN_RESET TMR
  309. * to struct scsi_device
  310. */
  311. static int tcm_loop_device_reset(struct scsi_cmnd *sc)
  312. {
  313. struct se_cmd *se_cmd = NULL;
  314. struct se_portal_group *se_tpg;
  315. struct se_session *se_sess;
  316. struct tcm_loop_cmd *tl_cmd = NULL;
  317. struct tcm_loop_hba *tl_hba;
  318. struct tcm_loop_nexus *tl_nexus;
  319. struct tcm_loop_tmr *tl_tmr = NULL;
  320. struct tcm_loop_tpg *tl_tpg;
  321. int ret = FAILED;
  322. /*
  323. * Locate the tcm_loop_hba_t pointer
  324. */
  325. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  326. /*
  327. * Locate the tl_nexus and se_sess pointers
  328. */
  329. tl_nexus = tl_hba->tl_nexus;
  330. if (!tl_nexus) {
  331. pr_err("Unable to perform device reset without"
  332. " active I_T Nexus\n");
  333. return FAILED;
  334. }
  335. se_sess = tl_nexus->se_sess;
  336. /*
  337. * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
  338. */
  339. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  340. se_tpg = &tl_tpg->tl_se_tpg;
  341. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
  342. if (!tl_cmd) {
  343. pr_err("Unable to allocate memory for tl_cmd\n");
  344. return FAILED;
  345. }
  346. tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
  347. if (!tl_tmr) {
  348. pr_err("Unable to allocate memory for tl_tmr\n");
  349. goto release;
  350. }
  351. init_waitqueue_head(&tl_tmr->tl_tmr_wait);
  352. se_cmd = &tl_cmd->tl_se_cmd;
  353. /*
  354. * Initialize struct se_cmd descriptor from target_core_mod infrastructure
  355. */
  356. transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
  357. DMA_NONE, MSG_SIMPLE_TAG,
  358. &tl_cmd->tl_sense_buf[0]);
  359. /*
  360. * Allocate the LUN_RESET TMR
  361. */
  362. se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
  363. TMR_LUN_RESET, GFP_KERNEL);
  364. if (IS_ERR(se_cmd->se_tmr_req))
  365. goto release;
  366. /*
  367. * Locate the underlying TCM struct se_lun from sc->device->lun
  368. */
  369. if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
  370. goto release;
  371. /*
  372. * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
  373. * to wake us up.
  374. */
  375. transport_generic_handle_tmr(se_cmd);
  376. wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
  377. /*
  378. * The TMR LUN_RESET has completed, check the response status and
  379. * then release allocations.
  380. */
  381. ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
  382. SUCCESS : FAILED;
  383. release:
  384. if (se_cmd)
  385. transport_generic_free_cmd(se_cmd, 1);
  386. else
  387. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  388. kfree(tl_tmr);
  389. return ret;
  390. }
  391. static int tcm_loop_slave_alloc(struct scsi_device *sd)
  392. {
  393. set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
  394. return 0;
  395. }
  396. static int tcm_loop_slave_configure(struct scsi_device *sd)
  397. {
  398. return 0;
  399. }
  400. static struct scsi_host_template tcm_loop_driver_template = {
  401. .proc_info = tcm_loop_proc_info,
  402. .proc_name = "tcm_loopback",
  403. .name = "TCM_Loopback",
  404. .queuecommand = tcm_loop_queuecommand,
  405. .change_queue_depth = tcm_loop_change_queue_depth,
  406. .eh_device_reset_handler = tcm_loop_device_reset,
  407. .can_queue = TL_SCSI_CAN_QUEUE,
  408. .this_id = -1,
  409. .sg_tablesize = TL_SCSI_SG_TABLESIZE,
  410. .cmd_per_lun = TL_SCSI_CMD_PER_LUN,
  411. .max_sectors = TL_SCSI_MAX_SECTORS,
  412. .use_clustering = DISABLE_CLUSTERING,
  413. .slave_alloc = tcm_loop_slave_alloc,
  414. .slave_configure = tcm_loop_slave_configure,
  415. .module = THIS_MODULE,
  416. };
  417. static int tcm_loop_driver_probe(struct device *dev)
  418. {
  419. struct tcm_loop_hba *tl_hba;
  420. struct Scsi_Host *sh;
  421. int error;
  422. tl_hba = to_tcm_loop_hba(dev);
  423. sh = scsi_host_alloc(&tcm_loop_driver_template,
  424. sizeof(struct tcm_loop_hba));
  425. if (!sh) {
  426. pr_err("Unable to allocate struct scsi_host\n");
  427. return -ENODEV;
  428. }
  429. tl_hba->sh = sh;
  430. /*
  431. * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
  432. */
  433. *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
  434. /*
  435. * Setup single ID, Channel and LUN for now..
  436. */
  437. sh->max_id = 2;
  438. sh->max_lun = 0;
  439. sh->max_channel = 0;
  440. sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
  441. error = scsi_add_host(sh, &tl_hba->dev);
  442. if (error) {
  443. pr_err("%s: scsi_add_host failed\n", __func__);
  444. scsi_host_put(sh);
  445. return -ENODEV;
  446. }
  447. return 0;
  448. }
  449. static int tcm_loop_driver_remove(struct device *dev)
  450. {
  451. struct tcm_loop_hba *tl_hba;
  452. struct Scsi_Host *sh;
  453. tl_hba = to_tcm_loop_hba(dev);
  454. sh = tl_hba->sh;
  455. scsi_remove_host(sh);
  456. scsi_host_put(sh);
  457. return 0;
  458. }
  459. static void tcm_loop_release_adapter(struct device *dev)
  460. {
  461. struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
  462. kfree(tl_hba);
  463. }
  464. /*
  465. * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
  466. */
  467. static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
  468. {
  469. int ret;
  470. tl_hba->dev.bus = &tcm_loop_lld_bus;
  471. tl_hba->dev.parent = tcm_loop_primary;
  472. tl_hba->dev.release = &tcm_loop_release_adapter;
  473. dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
  474. ret = device_register(&tl_hba->dev);
  475. if (ret) {
  476. pr_err("device_register() failed for"
  477. " tl_hba->dev: %d\n", ret);
  478. return -ENODEV;
  479. }
  480. return 0;
  481. }
  482. /*
  483. * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
  484. * tcm_loop SCSI bus.
  485. */
  486. static int tcm_loop_alloc_core_bus(void)
  487. {
  488. int ret;
  489. tcm_loop_primary = root_device_register("tcm_loop_0");
  490. if (IS_ERR(tcm_loop_primary)) {
  491. pr_err("Unable to allocate tcm_loop_primary\n");
  492. return PTR_ERR(tcm_loop_primary);
  493. }
  494. ret = bus_register(&tcm_loop_lld_bus);
  495. if (ret) {
  496. pr_err("bus_register() failed for tcm_loop_lld_bus\n");
  497. goto dev_unreg;
  498. }
  499. ret = driver_register(&tcm_loop_driverfs);
  500. if (ret) {
  501. pr_err("driver_register() failed for"
  502. "tcm_loop_driverfs\n");
  503. goto bus_unreg;
  504. }
  505. pr_debug("Initialized TCM Loop Core Bus\n");
  506. return ret;
  507. bus_unreg:
  508. bus_unregister(&tcm_loop_lld_bus);
  509. dev_unreg:
  510. root_device_unregister(tcm_loop_primary);
  511. return ret;
  512. }
  513. static void tcm_loop_release_core_bus(void)
  514. {
  515. driver_unregister(&tcm_loop_driverfs);
  516. bus_unregister(&tcm_loop_lld_bus);
  517. root_device_unregister(tcm_loop_primary);
  518. pr_debug("Releasing TCM Loop Core BUS\n");
  519. }
  520. static char *tcm_loop_get_fabric_name(void)
  521. {
  522. return "loopback";
  523. }
  524. static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
  525. {
  526. struct tcm_loop_tpg *tl_tpg =
  527. (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
  528. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  529. /*
  530. * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
  531. * time based on the protocol dependent prefix of the passed configfs group.
  532. *
  533. * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
  534. * ProtocolID using target_core_fabric_lib.c symbols.
  535. */
  536. switch (tl_hba->tl_proto_id) {
  537. case SCSI_PROTOCOL_SAS:
  538. return sas_get_fabric_proto_ident(se_tpg);
  539. case SCSI_PROTOCOL_FCP:
  540. return fc_get_fabric_proto_ident(se_tpg);
  541. case SCSI_PROTOCOL_ISCSI:
  542. return iscsi_get_fabric_proto_ident(se_tpg);
  543. default:
  544. pr_err("Unknown tl_proto_id: 0x%02x, using"
  545. " SAS emulation\n", tl_hba->tl_proto_id);
  546. break;
  547. }
  548. return sas_get_fabric_proto_ident(se_tpg);
  549. }
  550. static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
  551. {
  552. struct tcm_loop_tpg *tl_tpg =
  553. (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
  554. /*
  555. * Return the passed NAA identifier for the SAS Target Port
  556. */
  557. return &tl_tpg->tl_hba->tl_wwn_address[0];
  558. }
  559. static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
  560. {
  561. struct tcm_loop_tpg *tl_tpg =
  562. (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
  563. /*
  564. * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
  565. * to represent the SCSI Target Port.
  566. */
  567. return tl_tpg->tl_tpgt;
  568. }
  569. static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
  570. {
  571. return 1;
  572. }
  573. static u32 tcm_loop_get_pr_transport_id(
  574. struct se_portal_group *se_tpg,
  575. struct se_node_acl *se_nacl,
  576. struct t10_pr_registration *pr_reg,
  577. int *format_code,
  578. unsigned char *buf)
  579. {
  580. struct tcm_loop_tpg *tl_tpg =
  581. (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
  582. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  583. switch (tl_hba->tl_proto_id) {
  584. case SCSI_PROTOCOL_SAS:
  585. return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  586. format_code, buf);
  587. case SCSI_PROTOCOL_FCP:
  588. return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  589. format_code, buf);
  590. case SCSI_PROTOCOL_ISCSI:
  591. return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  592. format_code, buf);
  593. default:
  594. pr_err("Unknown tl_proto_id: 0x%02x, using"
  595. " SAS emulation\n", tl_hba->tl_proto_id);
  596. break;
  597. }
  598. return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  599. format_code, buf);
  600. }
  601. static u32 tcm_loop_get_pr_transport_id_len(
  602. struct se_portal_group *se_tpg,
  603. struct se_node_acl *se_nacl,
  604. struct t10_pr_registration *pr_reg,
  605. int *format_code)
  606. {
  607. struct tcm_loop_tpg *tl_tpg =
  608. (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
  609. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  610. switch (tl_hba->tl_proto_id) {
  611. case SCSI_PROTOCOL_SAS:
  612. return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  613. format_code);
  614. case SCSI_PROTOCOL_FCP:
  615. return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  616. format_code);
  617. case SCSI_PROTOCOL_ISCSI:
  618. return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  619. format_code);
  620. default:
  621. pr_err("Unknown tl_proto_id: 0x%02x, using"
  622. " SAS emulation\n", tl_hba->tl_proto_id);
  623. break;
  624. }
  625. return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  626. format_code);
  627. }
  628. /*
  629. * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
  630. * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
  631. */
  632. static char *tcm_loop_parse_pr_out_transport_id(
  633. struct se_portal_group *se_tpg,
  634. const char *buf,
  635. u32 *out_tid_len,
  636. char **port_nexus_ptr)
  637. {
  638. struct tcm_loop_tpg *tl_tpg =
  639. (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
  640. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  641. switch (tl_hba->tl_proto_id) {
  642. case SCSI_PROTOCOL_SAS:
  643. return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  644. port_nexus_ptr);
  645. case SCSI_PROTOCOL_FCP:
  646. return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  647. port_nexus_ptr);
  648. case SCSI_PROTOCOL_ISCSI:
  649. return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  650. port_nexus_ptr);
  651. default:
  652. pr_err("Unknown tl_proto_id: 0x%02x, using"
  653. " SAS emulation\n", tl_hba->tl_proto_id);
  654. break;
  655. }
  656. return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  657. port_nexus_ptr);
  658. }
  659. /*
  660. * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
  661. * based upon the incoming fabric dependent SCSI Initiator Port
  662. */
  663. static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
  664. {
  665. return 1;
  666. }
  667. static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
  668. {
  669. return 0;
  670. }
  671. /*
  672. * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
  673. * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
  674. */
  675. static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
  676. {
  677. return 0;
  678. }
  679. /*
  680. * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
  681. * never be called for TCM_Loop by target_core_fabric_configfs.c code.
  682. * It has been added here as a nop for target_fabric_tf_ops_check()
  683. */
  684. static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
  685. {
  686. return 0;
  687. }
  688. static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
  689. struct se_portal_group *se_tpg)
  690. {
  691. struct tcm_loop_nacl *tl_nacl;
  692. tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
  693. if (!tl_nacl) {
  694. pr_err("Unable to allocate struct tcm_loop_nacl\n");
  695. return NULL;
  696. }
  697. return &tl_nacl->se_node_acl;
  698. }
  699. static void tcm_loop_tpg_release_fabric_acl(
  700. struct se_portal_group *se_tpg,
  701. struct se_node_acl *se_nacl)
  702. {
  703. struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
  704. struct tcm_loop_nacl, se_node_acl);
  705. kfree(tl_nacl);
  706. }
  707. static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
  708. {
  709. return 1;
  710. }
  711. static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
  712. {
  713. /*
  714. * Assume struct scsi_cmnd is not in remove state..
  715. */
  716. return 0;
  717. }
  718. static int tcm_loop_sess_logged_in(struct se_session *se_sess)
  719. {
  720. /*
  721. * Assume that TL Nexus is always active
  722. */
  723. return 1;
  724. }
  725. static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
  726. {
  727. return 1;
  728. }
  729. static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
  730. {
  731. return;
  732. }
  733. static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
  734. {
  735. return 1;
  736. }
  737. static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
  738. {
  739. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  740. struct tcm_loop_cmd, tl_se_cmd);
  741. return tl_cmd->sc_cmd_state;
  742. }
  743. static int tcm_loop_shutdown_session(struct se_session *se_sess)
  744. {
  745. return 0;
  746. }
  747. static void tcm_loop_close_session(struct se_session *se_sess)
  748. {
  749. return;
  750. };
  751. static void tcm_loop_stop_session(
  752. struct se_session *se_sess,
  753. int sess_sleep,
  754. int conn_sleep)
  755. {
  756. return;
  757. }
  758. static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess)
  759. {
  760. return;
  761. }
  762. static int tcm_loop_write_pending(struct se_cmd *se_cmd)
  763. {
  764. /*
  765. * Since Linux/SCSI has already sent down a struct scsi_cmnd
  766. * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
  767. * memory, and memory has already been mapped to struct se_cmd->t_mem_list
  768. * format with transport_generic_map_mem_to_cmd().
  769. *
  770. * We now tell TCM to add this WRITE CDB directly into the TCM storage
  771. * object execution queue.
  772. */
  773. transport_generic_process_write(se_cmd);
  774. return 0;
  775. }
  776. static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
  777. {
  778. return 0;
  779. }
  780. static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
  781. {
  782. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  783. struct tcm_loop_cmd, tl_se_cmd);
  784. struct scsi_cmnd *sc = tl_cmd->sc;
  785. pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
  786. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  787. sc->result = SAM_STAT_GOOD;
  788. set_host_byte(sc, DID_OK);
  789. sc->scsi_done(sc);
  790. return 0;
  791. }
  792. static int tcm_loop_queue_status(struct se_cmd *se_cmd)
  793. {
  794. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  795. struct tcm_loop_cmd, tl_se_cmd);
  796. struct scsi_cmnd *sc = tl_cmd->sc;
  797. pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
  798. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  799. if (se_cmd->sense_buffer &&
  800. ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
  801. (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
  802. memcpy(sc->sense_buffer, se_cmd->sense_buffer,
  803. SCSI_SENSE_BUFFERSIZE);
  804. sc->result = SAM_STAT_CHECK_CONDITION;
  805. set_driver_byte(sc, DRIVER_SENSE);
  806. } else
  807. sc->result = se_cmd->scsi_status;
  808. set_host_byte(sc, DID_OK);
  809. sc->scsi_done(sc);
  810. return 0;
  811. }
  812. static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
  813. {
  814. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  815. struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
  816. /*
  817. * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
  818. * and wake up the wait_queue_head_t in tcm_loop_device_reset()
  819. */
  820. atomic_set(&tl_tmr->tmr_complete, 1);
  821. wake_up(&tl_tmr->tl_tmr_wait);
  822. return 0;
  823. }
  824. static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
  825. {
  826. return 0;
  827. }
  828. static u16 tcm_loop_get_fabric_sense_len(void)
  829. {
  830. return 0;
  831. }
  832. static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
  833. {
  834. switch (tl_hba->tl_proto_id) {
  835. case SCSI_PROTOCOL_SAS:
  836. return "SAS";
  837. case SCSI_PROTOCOL_FCP:
  838. return "FCP";
  839. case SCSI_PROTOCOL_ISCSI:
  840. return "iSCSI";
  841. default:
  842. break;
  843. }
  844. return "Unknown";
  845. }
  846. /* Start items for tcm_loop_port_cit */
  847. static int tcm_loop_port_link(
  848. struct se_portal_group *se_tpg,
  849. struct se_lun *lun)
  850. {
  851. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  852. struct tcm_loop_tpg, tl_se_tpg);
  853. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  854. atomic_inc(&tl_tpg->tl_tpg_port_count);
  855. smp_mb__after_atomic_inc();
  856. /*
  857. * Add Linux/SCSI struct scsi_device by HCTL
  858. */
  859. scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
  860. pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
  861. return 0;
  862. }
  863. static void tcm_loop_port_unlink(
  864. struct se_portal_group *se_tpg,
  865. struct se_lun *se_lun)
  866. {
  867. struct scsi_device *sd;
  868. struct tcm_loop_hba *tl_hba;
  869. struct tcm_loop_tpg *tl_tpg;
  870. tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  871. tl_hba = tl_tpg->tl_hba;
  872. sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
  873. se_lun->unpacked_lun);
  874. if (!sd) {
  875. pr_err("Unable to locate struct scsi_device for %d:%d:"
  876. "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
  877. return;
  878. }
  879. /*
  880. * Remove Linux/SCSI struct scsi_device by HCTL
  881. */
  882. scsi_remove_device(sd);
  883. scsi_device_put(sd);
  884. atomic_dec(&tl_tpg->tl_tpg_port_count);
  885. smp_mb__after_atomic_dec();
  886. pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
  887. }
  888. /* End items for tcm_loop_port_cit */
  889. /* Start items for tcm_loop_nexus_cit */
  890. static int tcm_loop_make_nexus(
  891. struct tcm_loop_tpg *tl_tpg,
  892. const char *name)
  893. {
  894. struct se_portal_group *se_tpg;
  895. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  896. struct tcm_loop_nexus *tl_nexus;
  897. int ret = -ENOMEM;
  898. if (tl_tpg->tl_hba->tl_nexus) {
  899. pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
  900. return -EEXIST;
  901. }
  902. se_tpg = &tl_tpg->tl_se_tpg;
  903. tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
  904. if (!tl_nexus) {
  905. pr_err("Unable to allocate struct tcm_loop_nexus\n");
  906. return -ENOMEM;
  907. }
  908. /*
  909. * Initialize the struct se_session pointer
  910. */
  911. tl_nexus->se_sess = transport_init_session();
  912. if (IS_ERR(tl_nexus->se_sess)) {
  913. ret = PTR_ERR(tl_nexus->se_sess);
  914. goto out;
  915. }
  916. /*
  917. * Since we are running in 'demo mode' this call with generate a
  918. * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
  919. * Initiator port name of the passed configfs group 'name'.
  920. */
  921. tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
  922. se_tpg, (unsigned char *)name);
  923. if (!tl_nexus->se_sess->se_node_acl) {
  924. transport_free_session(tl_nexus->se_sess);
  925. goto out;
  926. }
  927. /*
  928. * Now, register the SAS I_T Nexus as active with the call to
  929. * transport_register_session()
  930. */
  931. __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
  932. tl_nexus->se_sess, tl_nexus);
  933. tl_tpg->tl_hba->tl_nexus = tl_nexus;
  934. pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
  935. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
  936. name);
  937. return 0;
  938. out:
  939. kfree(tl_nexus);
  940. return ret;
  941. }
  942. static int tcm_loop_drop_nexus(
  943. struct tcm_loop_tpg *tpg)
  944. {
  945. struct se_session *se_sess;
  946. struct tcm_loop_nexus *tl_nexus;
  947. struct tcm_loop_hba *tl_hba = tpg->tl_hba;
  948. tl_nexus = tpg->tl_hba->tl_nexus;
  949. if (!tl_nexus)
  950. return -ENODEV;
  951. se_sess = tl_nexus->se_sess;
  952. if (!se_sess)
  953. return -ENODEV;
  954. if (atomic_read(&tpg->tl_tpg_port_count)) {
  955. pr_err("Unable to remove TCM_Loop I_T Nexus with"
  956. " active TPG port count: %d\n",
  957. atomic_read(&tpg->tl_tpg_port_count));
  958. return -EPERM;
  959. }
  960. pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
  961. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
  962. tl_nexus->se_sess->se_node_acl->initiatorname);
  963. /*
  964. * Release the SCSI I_T Nexus to the emulated SAS Target Port
  965. */
  966. transport_deregister_session(tl_nexus->se_sess);
  967. tpg->tl_hba->tl_nexus = NULL;
  968. kfree(tl_nexus);
  969. return 0;
  970. }
  971. /* End items for tcm_loop_nexus_cit */
  972. static ssize_t tcm_loop_tpg_show_nexus(
  973. struct se_portal_group *se_tpg,
  974. char *page)
  975. {
  976. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  977. struct tcm_loop_tpg, tl_se_tpg);
  978. struct tcm_loop_nexus *tl_nexus;
  979. ssize_t ret;
  980. tl_nexus = tl_tpg->tl_hba->tl_nexus;
  981. if (!tl_nexus)
  982. return -ENODEV;
  983. ret = snprintf(page, PAGE_SIZE, "%s\n",
  984. tl_nexus->se_sess->se_node_acl->initiatorname);
  985. return ret;
  986. }
  987. static ssize_t tcm_loop_tpg_store_nexus(
  988. struct se_portal_group *se_tpg,
  989. const char *page,
  990. size_t count)
  991. {
  992. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  993. struct tcm_loop_tpg, tl_se_tpg);
  994. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  995. unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
  996. int ret;
  997. /*
  998. * Shutdown the active I_T nexus if 'NULL' is passed..
  999. */
  1000. if (!strncmp(page, "NULL", 4)) {
  1001. ret = tcm_loop_drop_nexus(tl_tpg);
  1002. return (!ret) ? count : ret;
  1003. }
  1004. /*
  1005. * Otherwise make sure the passed virtual Initiator port WWN matches
  1006. * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
  1007. * tcm_loop_make_nexus()
  1008. */
  1009. if (strlen(page) >= TL_WWN_ADDR_LEN) {
  1010. pr_err("Emulated NAA Sas Address: %s, exceeds"
  1011. " max: %d\n", page, TL_WWN_ADDR_LEN);
  1012. return -EINVAL;
  1013. }
  1014. snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
  1015. ptr = strstr(i_port, "naa.");
  1016. if (ptr) {
  1017. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
  1018. pr_err("Passed SAS Initiator Port %s does not"
  1019. " match target port protoid: %s\n", i_port,
  1020. tcm_loop_dump_proto_id(tl_hba));
  1021. return -EINVAL;
  1022. }
  1023. port_ptr = &i_port[0];
  1024. goto check_newline;
  1025. }
  1026. ptr = strstr(i_port, "fc.");
  1027. if (ptr) {
  1028. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
  1029. pr_err("Passed FCP Initiator Port %s does not"
  1030. " match target port protoid: %s\n", i_port,
  1031. tcm_loop_dump_proto_id(tl_hba));
  1032. return -EINVAL;
  1033. }
  1034. port_ptr = &i_port[3]; /* Skip over "fc." */
  1035. goto check_newline;
  1036. }
  1037. ptr = strstr(i_port, "iqn.");
  1038. if (ptr) {
  1039. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
  1040. pr_err("Passed iSCSI Initiator Port %s does not"
  1041. " match target port protoid: %s\n", i_port,
  1042. tcm_loop_dump_proto_id(tl_hba));
  1043. return -EINVAL;
  1044. }
  1045. port_ptr = &i_port[0];
  1046. goto check_newline;
  1047. }
  1048. pr_err("Unable to locate prefix for emulated Initiator Port:"
  1049. " %s\n", i_port);
  1050. return -EINVAL;
  1051. /*
  1052. * Clear any trailing newline for the NAA WWN
  1053. */
  1054. check_newline:
  1055. if (i_port[strlen(i_port)-1] == '\n')
  1056. i_port[strlen(i_port)-1] = '\0';
  1057. ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
  1058. if (ret < 0)
  1059. return ret;
  1060. return count;
  1061. }
  1062. TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
  1063. static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
  1064. &tcm_loop_tpg_nexus.attr,
  1065. NULL,
  1066. };
  1067. /* Start items for tcm_loop_naa_cit */
  1068. struct se_portal_group *tcm_loop_make_naa_tpg(
  1069. struct se_wwn *wwn,
  1070. struct config_group *group,
  1071. const char *name)
  1072. {
  1073. struct tcm_loop_hba *tl_hba = container_of(wwn,
  1074. struct tcm_loop_hba, tl_hba_wwn);
  1075. struct tcm_loop_tpg *tl_tpg;
  1076. char *tpgt_str, *end_ptr;
  1077. int ret;
  1078. unsigned short int tpgt;
  1079. tpgt_str = strstr(name, "tpgt_");
  1080. if (!tpgt_str) {
  1081. pr_err("Unable to locate \"tpgt_#\" directory"
  1082. " group\n");
  1083. return ERR_PTR(-EINVAL);
  1084. }
  1085. tpgt_str += 5; /* Skip ahead of "tpgt_" */
  1086. tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
  1087. if (tpgt >= TL_TPGS_PER_HBA) {
  1088. pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
  1089. " %u\n", tpgt, TL_TPGS_PER_HBA);
  1090. return ERR_PTR(-EINVAL);
  1091. }
  1092. tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
  1093. tl_tpg->tl_hba = tl_hba;
  1094. tl_tpg->tl_tpgt = tpgt;
  1095. /*
  1096. * Register the tl_tpg as a emulated SAS TCM Target Endpoint
  1097. */
  1098. ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
  1099. wwn, &tl_tpg->tl_se_tpg, tl_tpg,
  1100. TRANSPORT_TPG_TYPE_NORMAL);
  1101. if (ret < 0)
  1102. return ERR_PTR(-ENOMEM);
  1103. pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
  1104. " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
  1105. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  1106. return &tl_tpg->tl_se_tpg;
  1107. }
  1108. void tcm_loop_drop_naa_tpg(
  1109. struct se_portal_group *se_tpg)
  1110. {
  1111. struct se_wwn *wwn = se_tpg->se_tpg_wwn;
  1112. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  1113. struct tcm_loop_tpg, tl_se_tpg);
  1114. struct tcm_loop_hba *tl_hba;
  1115. unsigned short tpgt;
  1116. tl_hba = tl_tpg->tl_hba;
  1117. tpgt = tl_tpg->tl_tpgt;
  1118. /*
  1119. * Release the I_T Nexus for the Virtual SAS link if present
  1120. */
  1121. tcm_loop_drop_nexus(tl_tpg);
  1122. /*
  1123. * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
  1124. */
  1125. core_tpg_deregister(se_tpg);
  1126. tl_tpg->tl_hba = NULL;
  1127. tl_tpg->tl_tpgt = 0;
  1128. pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
  1129. " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
  1130. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  1131. }
  1132. /* End items for tcm_loop_naa_cit */
  1133. /* Start items for tcm_loop_cit */
  1134. struct se_wwn *tcm_loop_make_scsi_hba(
  1135. struct target_fabric_configfs *tf,
  1136. struct config_group *group,
  1137. const char *name)
  1138. {
  1139. struct tcm_loop_hba *tl_hba;
  1140. struct Scsi_Host *sh;
  1141. char *ptr;
  1142. int ret, off = 0;
  1143. tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
  1144. if (!tl_hba) {
  1145. pr_err("Unable to allocate struct tcm_loop_hba\n");
  1146. return ERR_PTR(-ENOMEM);
  1147. }
  1148. /*
  1149. * Determine the emulated Protocol Identifier and Target Port Name
  1150. * based on the incoming configfs directory name.
  1151. */
  1152. ptr = strstr(name, "naa.");
  1153. if (ptr) {
  1154. tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
  1155. goto check_len;
  1156. }
  1157. ptr = strstr(name, "fc.");
  1158. if (ptr) {
  1159. tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
  1160. off = 3; /* Skip over "fc." */
  1161. goto check_len;
  1162. }
  1163. ptr = strstr(name, "iqn.");
  1164. if (!ptr) {
  1165. pr_err("Unable to locate prefix for emulated Target "
  1166. "Port: %s\n", name);
  1167. ret = -EINVAL;
  1168. goto out;
  1169. }
  1170. tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
  1171. check_len:
  1172. if (strlen(name) >= TL_WWN_ADDR_LEN) {
  1173. pr_err("Emulated NAA %s Address: %s, exceeds"
  1174. " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
  1175. TL_WWN_ADDR_LEN);
  1176. ret = -EINVAL;
  1177. goto out;
  1178. }
  1179. snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
  1180. /*
  1181. * Call device_register(tl_hba->dev) to register the emulated
  1182. * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
  1183. * device_register() callbacks in tcm_loop_driver_probe()
  1184. */
  1185. ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
  1186. if (ret)
  1187. goto out;
  1188. sh = tl_hba->sh;
  1189. tcm_loop_hba_no_cnt++;
  1190. pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
  1191. " %s Address: %s at Linux/SCSI Host ID: %d\n",
  1192. tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
  1193. return &tl_hba->tl_hba_wwn;
  1194. out:
  1195. kfree(tl_hba);
  1196. return ERR_PTR(ret);
  1197. }
  1198. void tcm_loop_drop_scsi_hba(
  1199. struct se_wwn *wwn)
  1200. {
  1201. struct tcm_loop_hba *tl_hba = container_of(wwn,
  1202. struct tcm_loop_hba, tl_hba_wwn);
  1203. int host_no = tl_hba->sh->host_no;
  1204. /*
  1205. * Call device_unregister() on the original tl_hba->dev.
  1206. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
  1207. * release *tl_hba;
  1208. */
  1209. device_unregister(&tl_hba->dev);
  1210. pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
  1211. " SAS Address: %s at Linux/SCSI Host ID: %d\n",
  1212. config_item_name(&wwn->wwn_group.cg_item), host_no);
  1213. }
  1214. /* Start items for tcm_loop_cit */
  1215. static ssize_t tcm_loop_wwn_show_attr_version(
  1216. struct target_fabric_configfs *tf,
  1217. char *page)
  1218. {
  1219. return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
  1220. }
  1221. TF_WWN_ATTR_RO(tcm_loop, version);
  1222. static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
  1223. &tcm_loop_wwn_version.attr,
  1224. NULL,
  1225. };
  1226. /* End items for tcm_loop_cit */
  1227. static int tcm_loop_register_configfs(void)
  1228. {
  1229. struct target_fabric_configfs *fabric;
  1230. struct config_group *tf_cg;
  1231. int ret;
  1232. /*
  1233. * Set the TCM Loop HBA counter to zero
  1234. */
  1235. tcm_loop_hba_no_cnt = 0;
  1236. /*
  1237. * Register the top level struct config_item_type with TCM core
  1238. */
  1239. fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
  1240. if (IS_ERR(fabric)) {
  1241. pr_err("tcm_loop_register_configfs() failed!\n");
  1242. return PTR_ERR(fabric);
  1243. }
  1244. /*
  1245. * Setup the fabric API of function pointers used by target_core_mod
  1246. */
  1247. fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
  1248. fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
  1249. fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
  1250. fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
  1251. fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
  1252. fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
  1253. fabric->tf_ops.tpg_get_pr_transport_id_len =
  1254. &tcm_loop_get_pr_transport_id_len;
  1255. fabric->tf_ops.tpg_parse_pr_out_transport_id =
  1256. &tcm_loop_parse_pr_out_transport_id;
  1257. fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
  1258. fabric->tf_ops.tpg_check_demo_mode_cache =
  1259. &tcm_loop_check_demo_mode_cache;
  1260. fabric->tf_ops.tpg_check_demo_mode_write_protect =
  1261. &tcm_loop_check_demo_mode_write_protect;
  1262. fabric->tf_ops.tpg_check_prod_mode_write_protect =
  1263. &tcm_loop_check_prod_mode_write_protect;
  1264. /*
  1265. * The TCM loopback fabric module runs in demo-mode to a local
  1266. * virtual SCSI device, so fabric dependent initator ACLs are
  1267. * not required.
  1268. */
  1269. fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
  1270. fabric->tf_ops.tpg_release_fabric_acl =
  1271. &tcm_loop_tpg_release_fabric_acl;
  1272. fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
  1273. /*
  1274. * Used for setting up remaining TCM resources in process context
  1275. */
  1276. fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
  1277. fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
  1278. fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
  1279. fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
  1280. fabric->tf_ops.close_session = &tcm_loop_close_session;
  1281. fabric->tf_ops.stop_session = &tcm_loop_stop_session;
  1282. fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0;
  1283. fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in;
  1284. fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
  1285. fabric->tf_ops.sess_get_initiator_sid = NULL;
  1286. fabric->tf_ops.write_pending = &tcm_loop_write_pending;
  1287. fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
  1288. /*
  1289. * Not used for TCM loopback
  1290. */
  1291. fabric->tf_ops.set_default_node_attributes =
  1292. &tcm_loop_set_default_node_attributes;
  1293. fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
  1294. fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
  1295. fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
  1296. fabric->tf_ops.queue_status = &tcm_loop_queue_status;
  1297. fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
  1298. fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
  1299. fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
  1300. fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
  1301. tf_cg = &fabric->tf_group;
  1302. /*
  1303. * Setup function pointers for generic logic in target_core_fabric_configfs.c
  1304. */
  1305. fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
  1306. fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
  1307. fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
  1308. fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
  1309. /*
  1310. * fabric_post_link() and fabric_pre_unlink() are used for
  1311. * registration and release of TCM Loop Virtual SCSI LUNs.
  1312. */
  1313. fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
  1314. fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
  1315. fabric->tf_ops.fabric_make_np = NULL;
  1316. fabric->tf_ops.fabric_drop_np = NULL;
  1317. /*
  1318. * Setup default attribute lists for various fabric->tf_cit_tmpl
  1319. */
  1320. TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
  1321. TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
  1322. TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
  1323. TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
  1324. TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
  1325. /*
  1326. * Once fabric->tf_ops has been setup, now register the fabric for
  1327. * use within TCM
  1328. */
  1329. ret = target_fabric_configfs_register(fabric);
  1330. if (ret < 0) {
  1331. pr_err("target_fabric_configfs_register() for"
  1332. " TCM_Loop failed!\n");
  1333. target_fabric_configfs_free(fabric);
  1334. return -1;
  1335. }
  1336. /*
  1337. * Setup our local pointer to *fabric.
  1338. */
  1339. tcm_loop_fabric_configfs = fabric;
  1340. pr_debug("TCM_LOOP[0] - Set fabric ->"
  1341. " tcm_loop_fabric_configfs\n");
  1342. return 0;
  1343. }
  1344. static void tcm_loop_deregister_configfs(void)
  1345. {
  1346. if (!tcm_loop_fabric_configfs)
  1347. return;
  1348. target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
  1349. tcm_loop_fabric_configfs = NULL;
  1350. pr_debug("TCM_LOOP[0] - Cleared"
  1351. " tcm_loop_fabric_configfs\n");
  1352. }
  1353. static int __init tcm_loop_fabric_init(void)
  1354. {
  1355. int ret;
  1356. tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
  1357. sizeof(struct tcm_loop_cmd),
  1358. __alignof__(struct tcm_loop_cmd),
  1359. 0, NULL);
  1360. if (!tcm_loop_cmd_cache) {
  1361. pr_debug("kmem_cache_create() for"
  1362. " tcm_loop_cmd_cache failed\n");
  1363. return -ENOMEM;
  1364. }
  1365. ret = tcm_loop_alloc_core_bus();
  1366. if (ret)
  1367. return ret;
  1368. ret = tcm_loop_register_configfs();
  1369. if (ret) {
  1370. tcm_loop_release_core_bus();
  1371. return ret;
  1372. }
  1373. return 0;
  1374. }
  1375. static void __exit tcm_loop_fabric_exit(void)
  1376. {
  1377. tcm_loop_deregister_configfs();
  1378. tcm_loop_release_core_bus();
  1379. kmem_cache_destroy(tcm_loop_cmd_cache);
  1380. }
  1381. MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
  1382. MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
  1383. MODULE_LICENSE("GPL");
  1384. module_init(tcm_loop_fabric_init);
  1385. module_exit(tcm_loop_fabric_exit);