target_core_pscsi.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470
  1. /*******************************************************************************
  2. * Filename: target_core_pscsi.c
  3. *
  4. * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
  5. *
  6. * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
  7. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  8. * Copyright (c) 2007-2010 Rising Tide Systems
  9. * Copyright (c) 2008-2010 Linux-iSCSI.org
  10. *
  11. * Nicholas A. Bellinger <nab@kernel.org>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. *
  27. ******************************************************************************/
  28. #include <linux/version.h>
  29. #include <linux/string.h>
  30. #include <linux/parser.h>
  31. #include <linux/timer.h>
  32. #include <linux/blkdev.h>
  33. #include <linux/blk_types.h>
  34. #include <linux/slab.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/smp_lock.h>
  37. #include <linux/genhd.h>
  38. #include <linux/cdrom.h>
  39. #include <linux/file.h>
  40. #include <scsi/scsi.h>
  41. #include <scsi/scsi_device.h>
  42. #include <scsi/scsi_cmnd.h>
  43. #include <scsi/scsi_host.h>
  44. #include <scsi/libsas.h> /* For TASK_ATTR_* */
  45. #include <target/target_core_base.h>
  46. #include <target/target_core_device.h>
  47. #include <target/target_core_transport.h>
  48. #include "target_core_pscsi.h"
  49. #define ISPRINT(a) ((a >= ' ') && (a <= '~'))
  50. static struct se_subsystem_api pscsi_template;
  51. static void pscsi_req_done(struct request *, int);
  52. /* pscsi_get_sh():
  53. *
  54. *
  55. */
  56. static struct Scsi_Host *pscsi_get_sh(u32 host_no)
  57. {
  58. struct Scsi_Host *sh = NULL;
  59. sh = scsi_host_lookup(host_no);
  60. if (IS_ERR(sh)) {
  61. printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
  62. " %u\n", host_no);
  63. return NULL;
  64. }
  65. return sh;
  66. }
  67. /* pscsi_attach_hba():
  68. *
  69. * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
  70. * from the passed SCSI Host ID.
  71. */
  72. static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
  73. {
  74. int hba_depth;
  75. struct pscsi_hba_virt *phv;
  76. phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
  77. if (!(phv)) {
  78. printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
  79. return -1;
  80. }
  81. phv->phv_host_id = host_id;
  82. phv->phv_mode = PHV_VIRUTAL_HOST_ID;
  83. hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
  84. atomic_set(&hba->left_queue_depth, hba_depth);
  85. atomic_set(&hba->max_queue_depth, hba_depth);
  86. hba->hba_ptr = (void *)phv;
  87. printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
  88. " Generic Target Core Stack %s\n", hba->hba_id,
  89. PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
  90. printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
  91. " Target Core with TCQ Depth: %d\n", hba->hba_id,
  92. atomic_read(&hba->max_queue_depth));
  93. return 0;
  94. }
  95. static void pscsi_detach_hba(struct se_hba *hba)
  96. {
  97. struct pscsi_hba_virt *phv = hba->hba_ptr;
  98. struct Scsi_Host *scsi_host = phv->phv_lld_host;
  99. if (scsi_host) {
  100. scsi_host_put(scsi_host);
  101. printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
  102. " Generic Target Core\n", hba->hba_id,
  103. (scsi_host->hostt->name) ? (scsi_host->hostt->name) :
  104. "Unknown");
  105. } else
  106. printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
  107. " from Generic Target Core\n", hba->hba_id);
  108. kfree(phv);
  109. hba->hba_ptr = NULL;
  110. }
  111. static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
  112. {
  113. struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
  114. struct Scsi_Host *sh = phv->phv_lld_host;
  115. int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
  116. /*
  117. * Release the struct Scsi_Host
  118. */
  119. if (!(mode_flag)) {
  120. if (!(sh))
  121. return 0;
  122. phv->phv_lld_host = NULL;
  123. phv->phv_mode = PHV_VIRUTAL_HOST_ID;
  124. atomic_set(&hba->left_queue_depth, hba_depth);
  125. atomic_set(&hba->max_queue_depth, hba_depth);
  126. printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
  127. " %s\n", hba->hba_id, (sh->hostt->name) ?
  128. (sh->hostt->name) : "Unknown");
  129. scsi_host_put(sh);
  130. return 0;
  131. }
  132. /*
  133. * Otherwise, locate struct Scsi_Host from the original passed
  134. * pSCSI Host ID and enable for phba mode
  135. */
  136. sh = pscsi_get_sh(phv->phv_host_id);
  137. if (!(sh)) {
  138. printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
  139. " phv_host_id: %d\n", phv->phv_host_id);
  140. return -1;
  141. }
  142. /*
  143. * Usually the SCSI LLD will use the hostt->can_queue value to define
  144. * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
  145. * this at all and set sh->can_queue at runtime.
  146. */
  147. hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
  148. sh->hostt->can_queue : sh->can_queue;
  149. atomic_set(&hba->left_queue_depth, hba_depth);
  150. atomic_set(&hba->max_queue_depth, hba_depth);
  151. phv->phv_lld_host = sh;
  152. phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
  153. printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
  154. hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
  155. return 1;
  156. }
  157. static void pscsi_tape_read_blocksize(struct se_device *dev,
  158. struct scsi_device *sdev)
  159. {
  160. unsigned char cdb[MAX_COMMAND_SIZE], *buf;
  161. int ret;
  162. buf = kzalloc(12, GFP_KERNEL);
  163. if (!buf)
  164. return;
  165. memset(cdb, 0, MAX_COMMAND_SIZE);
  166. cdb[0] = MODE_SENSE;
  167. cdb[4] = 0x0c; /* 12 bytes */
  168. ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
  169. HZ, 1, NULL);
  170. if (ret)
  171. goto out_free;
  172. /*
  173. * If MODE_SENSE still returns zero, set the default value to 1024.
  174. */
  175. sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
  176. if (!sdev->sector_size)
  177. sdev->sector_size = 1024;
  178. out_free:
  179. kfree(buf);
  180. }
  181. static void
  182. pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
  183. {
  184. unsigned char *buf;
  185. if (sdev->inquiry_len < INQUIRY_LEN)
  186. return;
  187. buf = sdev->inquiry;
  188. if (!buf)
  189. return;
  190. /*
  191. * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
  192. */
  193. memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
  194. memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
  195. memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
  196. }
  197. static int
  198. pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
  199. {
  200. unsigned char cdb[MAX_COMMAND_SIZE], *buf;
  201. int ret;
  202. buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
  203. if (!buf)
  204. return -1;
  205. memset(cdb, 0, MAX_COMMAND_SIZE);
  206. cdb[0] = INQUIRY;
  207. cdb[1] = 0x01; /* Query VPD */
  208. cdb[2] = 0x80; /* Unit Serial Number */
  209. cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
  210. cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
  211. ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
  212. INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
  213. if (ret)
  214. goto out_free;
  215. snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
  216. wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
  217. kfree(buf);
  218. return 0;
  219. out_free:
  220. kfree(buf);
  221. return -1;
  222. }
  223. static void
  224. pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
  225. struct t10_wwn *wwn)
  226. {
  227. unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
  228. int ident_len, page_len, off = 4, ret;
  229. struct t10_vpd *vpd;
  230. buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
  231. if (!buf)
  232. return;
  233. memset(cdb, 0, MAX_COMMAND_SIZE);
  234. cdb[0] = INQUIRY;
  235. cdb[1] = 0x01; /* Query VPD */
  236. cdb[2] = 0x83; /* Device Identifier */
  237. cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
  238. cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
  239. ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
  240. INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
  241. NULL, HZ, 1, NULL);
  242. if (ret)
  243. goto out;
  244. page_len = (buf[2] << 8) | buf[3];
  245. while (page_len > 0) {
  246. /* Grab a pointer to the Identification descriptor */
  247. page_83 = &buf[off];
  248. ident_len = page_83[3];
  249. if (!ident_len) {
  250. printk(KERN_ERR "page_83[3]: identifier"
  251. " length zero!\n");
  252. break;
  253. }
  254. printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
  255. vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
  256. if (!vpd) {
  257. printk(KERN_ERR "Unable to allocate memory for"
  258. " struct t10_vpd\n");
  259. goto out;
  260. }
  261. INIT_LIST_HEAD(&vpd->vpd_list);
  262. transport_set_vpd_proto_id(vpd, page_83);
  263. transport_set_vpd_assoc(vpd, page_83);
  264. if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
  265. off += (ident_len + 4);
  266. page_len -= (ident_len + 4);
  267. kfree(vpd);
  268. continue;
  269. }
  270. if (transport_set_vpd_ident(vpd, page_83) < 0) {
  271. off += (ident_len + 4);
  272. page_len -= (ident_len + 4);
  273. kfree(vpd);
  274. continue;
  275. }
  276. list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
  277. off += (ident_len + 4);
  278. page_len -= (ident_len + 4);
  279. }
  280. out:
  281. kfree(buf);
  282. }
  283. /* pscsi_add_device_to_list():
  284. *
  285. *
  286. */
  287. static struct se_device *pscsi_add_device_to_list(
  288. struct se_hba *hba,
  289. struct se_subsystem_dev *se_dev,
  290. struct pscsi_dev_virt *pdv,
  291. struct scsi_device *sd,
  292. int dev_flags)
  293. {
  294. struct se_device *dev;
  295. struct se_dev_limits dev_limits;
  296. struct request_queue *q;
  297. struct queue_limits *limits;
  298. memset(&dev_limits, 0, sizeof(struct se_dev_limits));
  299. if (!sd->queue_depth) {
  300. sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
  301. printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
  302. " queue_depth to %d\n", sd->channel, sd->id,
  303. sd->lun, sd->queue_depth);
  304. }
  305. /*
  306. * Setup the local scope queue_limits from struct request_queue->limits
  307. * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
  308. */
  309. q = sd->request_queue;
  310. limits = &dev_limits.limits;
  311. limits->logical_block_size = sd->sector_size;
  312. limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
  313. queue_max_hw_sectors(q) : sd->host->max_sectors;
  314. limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
  315. queue_max_sectors(q) : sd->host->max_sectors;
  316. dev_limits.hw_queue_depth = sd->queue_depth;
  317. dev_limits.queue_depth = sd->queue_depth;
  318. /*
  319. * Setup our standard INQUIRY info into se_dev->t10_wwn
  320. */
  321. pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
  322. /*
  323. * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
  324. * which has already been referenced with Linux SCSI code with
  325. * scsi_device_get() in this file's pscsi_create_virtdevice().
  326. *
  327. * The passthrough operations called by the transport_add_device_*
  328. * function below will require this pointer to be set for passthroug
  329. * ops.
  330. *
  331. * For the shutdown case in pscsi_free_device(), this struct
  332. * scsi_device reference is released with Linux SCSI code
  333. * scsi_device_put() and the pdv->pdv_sd cleared.
  334. */
  335. pdv->pdv_sd = sd;
  336. dev = transport_add_device_to_core_hba(hba, &pscsi_template,
  337. se_dev, dev_flags, (void *)pdv,
  338. &dev_limits, NULL, NULL);
  339. if (!(dev)) {
  340. pdv->pdv_sd = NULL;
  341. return NULL;
  342. }
  343. /*
  344. * Locate VPD WWN Information used for various purposes within
  345. * the Storage Engine.
  346. */
  347. if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
  348. /*
  349. * If VPD Unit Serial returned GOOD status, try
  350. * VPD Device Identification page (0x83).
  351. */
  352. pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
  353. }
  354. /*
  355. * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
  356. */
  357. if (sd->type == TYPE_TAPE)
  358. pscsi_tape_read_blocksize(dev, sd);
  359. return dev;
  360. }
  361. static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
  362. {
  363. struct pscsi_dev_virt *pdv;
  364. pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
  365. if (!(pdv)) {
  366. printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
  367. return NULL;
  368. }
  369. pdv->pdv_se_hba = hba;
  370. printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
  371. return (void *)pdv;
  372. }
  373. /*
  374. * Called with struct Scsi_Host->host_lock called.
  375. */
  376. static struct se_device *pscsi_create_type_disk(
  377. struct scsi_device *sd,
  378. struct pscsi_dev_virt *pdv,
  379. struct se_subsystem_dev *se_dev,
  380. struct se_hba *hba)
  381. {
  382. struct se_device *dev;
  383. struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
  384. struct Scsi_Host *sh = sd->host;
  385. struct block_device *bd;
  386. u32 dev_flags = 0;
  387. if (scsi_device_get(sd)) {
  388. printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
  389. sh->host_no, sd->channel, sd->id, sd->lun);
  390. spin_unlock_irq(sh->host_lock);
  391. return NULL;
  392. }
  393. spin_unlock_irq(sh->host_lock);
  394. /*
  395. * Claim exclusive struct block_device access to struct scsi_device
  396. * for TYPE_DISK using supplied udev_path
  397. */
  398. bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
  399. FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
  400. if (!(bd)) {
  401. printk("pSCSI: blkdev_get_by_path() failed\n");
  402. scsi_device_put(sd);
  403. return NULL;
  404. }
  405. pdv->pdv_bd = bd;
  406. dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
  407. if (!(dev)) {
  408. blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
  409. scsi_device_put(sd);
  410. return NULL;
  411. }
  412. printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
  413. phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
  414. return dev;
  415. }
  416. /*
  417. * Called with struct Scsi_Host->host_lock called.
  418. */
  419. static struct se_device *pscsi_create_type_rom(
  420. struct scsi_device *sd,
  421. struct pscsi_dev_virt *pdv,
  422. struct se_subsystem_dev *se_dev,
  423. struct se_hba *hba)
  424. {
  425. struct se_device *dev;
  426. struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
  427. struct Scsi_Host *sh = sd->host;
  428. u32 dev_flags = 0;
  429. if (scsi_device_get(sd)) {
  430. printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
  431. sh->host_no, sd->channel, sd->id, sd->lun);
  432. spin_unlock_irq(sh->host_lock);
  433. return NULL;
  434. }
  435. spin_unlock_irq(sh->host_lock);
  436. dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
  437. if (!(dev)) {
  438. scsi_device_put(sd);
  439. return NULL;
  440. }
  441. printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
  442. phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
  443. sd->channel, sd->id, sd->lun);
  444. return dev;
  445. }
  446. /*
  447. *Called with struct Scsi_Host->host_lock called.
  448. */
  449. static struct se_device *pscsi_create_type_other(
  450. struct scsi_device *sd,
  451. struct pscsi_dev_virt *pdv,
  452. struct se_subsystem_dev *se_dev,
  453. struct se_hba *hba)
  454. {
  455. struct se_device *dev;
  456. struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
  457. struct Scsi_Host *sh = sd->host;
  458. u32 dev_flags = 0;
  459. spin_unlock_irq(sh->host_lock);
  460. dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
  461. if (!(dev))
  462. return NULL;
  463. printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
  464. phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
  465. sd->channel, sd->id, sd->lun);
  466. return dev;
  467. }
  468. static struct se_device *pscsi_create_virtdevice(
  469. struct se_hba *hba,
  470. struct se_subsystem_dev *se_dev,
  471. void *p)
  472. {
  473. struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
  474. struct se_device *dev;
  475. struct scsi_device *sd;
  476. struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
  477. struct Scsi_Host *sh = phv->phv_lld_host;
  478. int legacy_mode_enable = 0;
  479. if (!(pdv)) {
  480. printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
  481. " parameter\n");
  482. return NULL;
  483. }
  484. /*
  485. * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
  486. * struct Scsi_Host we will need to bring the TCM/pSCSI object online
  487. */
  488. if (!(sh)) {
  489. if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
  490. printk(KERN_ERR "pSCSI: Unable to locate struct"
  491. " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
  492. return NULL;
  493. }
  494. /*
  495. * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
  496. * reference, we enforce that udev_path has been set
  497. */
  498. if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
  499. printk(KERN_ERR "pSCSI: udev_path attribute has not"
  500. " been set before ENABLE=1\n");
  501. return NULL;
  502. }
  503. /*
  504. * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
  505. * use the original TCM hba ID to reference Linux/SCSI Host No
  506. * and enable for PHV_LLD_SCSI_HOST_NO mode.
  507. */
  508. if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
  509. spin_lock(&hba->device_lock);
  510. if (!(list_empty(&hba->hba_dev_list))) {
  511. printk(KERN_ERR "pSCSI: Unable to set hba_mode"
  512. " with active devices\n");
  513. spin_unlock(&hba->device_lock);
  514. return NULL;
  515. }
  516. spin_unlock(&hba->device_lock);
  517. if (pscsi_pmode_enable_hba(hba, 1) != 1)
  518. return NULL;
  519. legacy_mode_enable = 1;
  520. hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
  521. sh = phv->phv_lld_host;
  522. } else {
  523. sh = pscsi_get_sh(pdv->pdv_host_id);
  524. if (!(sh)) {
  525. printk(KERN_ERR "pSCSI: Unable to locate"
  526. " pdv_host_id: %d\n", pdv->pdv_host_id);
  527. return NULL;
  528. }
  529. }
  530. } else {
  531. if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
  532. printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
  533. " struct Scsi_Host exists\n");
  534. return NULL;
  535. }
  536. }
  537. spin_lock_irq(sh->host_lock);
  538. list_for_each_entry(sd, &sh->__devices, siblings) {
  539. if ((pdv->pdv_channel_id != sd->channel) ||
  540. (pdv->pdv_target_id != sd->id) ||
  541. (pdv->pdv_lun_id != sd->lun))
  542. continue;
  543. /*
  544. * Functions will release the held struct scsi_host->host_lock
  545. * before calling calling pscsi_add_device_to_list() to register
  546. * struct scsi_device with target_core_mod.
  547. */
  548. switch (sd->type) {
  549. case TYPE_DISK:
  550. dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
  551. break;
  552. case TYPE_ROM:
  553. dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
  554. break;
  555. default:
  556. dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
  557. break;
  558. }
  559. if (!(dev)) {
  560. if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
  561. scsi_host_put(sh);
  562. else if (legacy_mode_enable) {
  563. pscsi_pmode_enable_hba(hba, 0);
  564. hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
  565. }
  566. pdv->pdv_sd = NULL;
  567. return NULL;
  568. }
  569. return dev;
  570. }
  571. spin_unlock_irq(sh->host_lock);
  572. printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
  573. pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
  574. if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
  575. scsi_host_put(sh);
  576. else if (legacy_mode_enable) {
  577. pscsi_pmode_enable_hba(hba, 0);
  578. hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
  579. }
  580. return NULL;
  581. }
  582. /* pscsi_free_device(): (Part of se_subsystem_api_t template)
  583. *
  584. *
  585. */
  586. static void pscsi_free_device(void *p)
  587. {
  588. struct pscsi_dev_virt *pdv = p;
  589. struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
  590. struct scsi_device *sd = pdv->pdv_sd;
  591. if (sd) {
  592. /*
  593. * Release exclusive pSCSI internal struct block_device claim for
  594. * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
  595. */
  596. if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
  597. blkdev_put(pdv->pdv_bd,
  598. FMODE_WRITE|FMODE_READ|FMODE_EXCL);
  599. pdv->pdv_bd = NULL;
  600. }
  601. /*
  602. * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
  603. * to struct Scsi_Host now.
  604. */
  605. if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
  606. (phv->phv_lld_host != NULL))
  607. scsi_host_put(phv->phv_lld_host);
  608. if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
  609. scsi_device_put(sd);
  610. pdv->pdv_sd = NULL;
  611. }
  612. kfree(pdv);
  613. }
  614. static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
  615. {
  616. return container_of(task, struct pscsi_plugin_task, pscsi_task);
  617. }
  618. /* pscsi_transport_complete():
  619. *
  620. *
  621. */
  622. static int pscsi_transport_complete(struct se_task *task)
  623. {
  624. struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
  625. struct scsi_device *sd = pdv->pdv_sd;
  626. int result;
  627. struct pscsi_plugin_task *pt = PSCSI_TASK(task);
  628. unsigned char *cdb = &pt->pscsi_cdb[0];
  629. result = pt->pscsi_result;
  630. /*
  631. * Hack to make sure that Write-Protect modepage is set if R/O mode is
  632. * forced.
  633. */
  634. if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
  635. (status_byte(result) << 1) == SAM_STAT_GOOD) {
  636. if (!TASK_CMD(task)->se_deve)
  637. goto after_mode_sense;
  638. if (TASK_CMD(task)->se_deve->lun_flags &
  639. TRANSPORT_LUNFLAGS_READ_ONLY) {
  640. unsigned char *buf = (unsigned char *)
  641. T_TASK(task->task_se_cmd)->t_task_buf;
  642. if (cdb[0] == MODE_SENSE_10) {
  643. if (!(buf[3] & 0x80))
  644. buf[3] |= 0x80;
  645. } else {
  646. if (!(buf[2] & 0x80))
  647. buf[2] |= 0x80;
  648. }
  649. }
  650. }
  651. after_mode_sense:
  652. if (sd->type != TYPE_TAPE)
  653. goto after_mode_select;
  654. /*
  655. * Hack to correctly obtain the initiator requested blocksize for
  656. * TYPE_TAPE. Since this value is dependent upon each tape media,
  657. * struct scsi_device->sector_size will not contain the correct value
  658. * by default, so we go ahead and set it so
  659. * TRANSPORT(dev)->get_blockdev() returns the correct value to the
  660. * storage engine.
  661. */
  662. if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
  663. (status_byte(result) << 1) == SAM_STAT_GOOD) {
  664. unsigned char *buf;
  665. struct scatterlist *sg = task->task_sg;
  666. u16 bdl;
  667. u32 blocksize;
  668. buf = sg_virt(&sg[0]);
  669. if (!(buf)) {
  670. printk(KERN_ERR "Unable to get buf for scatterlist\n");
  671. goto after_mode_select;
  672. }
  673. if (cdb[0] == MODE_SELECT)
  674. bdl = (buf[3]);
  675. else
  676. bdl = (buf[6] << 8) | (buf[7]);
  677. if (!bdl)
  678. goto after_mode_select;
  679. if (cdb[0] == MODE_SELECT)
  680. blocksize = (buf[9] << 16) | (buf[10] << 8) |
  681. (buf[11]);
  682. else
  683. blocksize = (buf[13] << 16) | (buf[14] << 8) |
  684. (buf[15]);
  685. sd->sector_size = blocksize;
  686. }
  687. after_mode_select:
  688. if (status_byte(result) & CHECK_CONDITION)
  689. return 1;
  690. return 0;
  691. }
  692. static struct se_task *
  693. pscsi_alloc_task(struct se_cmd *cmd)
  694. {
  695. struct pscsi_plugin_task *pt;
  696. unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
  697. pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
  698. if (!pt) {
  699. printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
  700. return NULL;
  701. }
  702. /*
  703. * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
  704. * allocate the extended CDB buffer for per struct se_task context
  705. * pt->pscsi_cdb now.
  706. */
  707. if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
  708. pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
  709. if (!(pt->pscsi_cdb)) {
  710. printk(KERN_ERR "pSCSI: Unable to allocate extended"
  711. " pt->pscsi_cdb\n");
  712. return NULL;
  713. }
  714. } else
  715. pt->pscsi_cdb = &pt->__pscsi_cdb[0];
  716. return &pt->pscsi_task;
  717. }
  718. static inline void pscsi_blk_init_request(
  719. struct se_task *task,
  720. struct pscsi_plugin_task *pt,
  721. struct request *req,
  722. int bidi_read)
  723. {
  724. /*
  725. * Defined as "scsi command" in include/linux/blkdev.h.
  726. */
  727. req->cmd_type = REQ_TYPE_BLOCK_PC;
  728. /*
  729. * For the extra BIDI-COMMAND READ struct request we do not
  730. * need to setup the remaining structure members
  731. */
  732. if (bidi_read)
  733. return;
  734. /*
  735. * Setup the done function pointer for struct request,
  736. * also set the end_io_data pointer.to struct se_task.
  737. */
  738. req->end_io = pscsi_req_done;
  739. req->end_io_data = (void *)task;
  740. /*
  741. * Load the referenced struct se_task's SCSI CDB into
  742. * include/linux/blkdev.h:struct request->cmd
  743. */
  744. req->cmd_len = scsi_command_size(pt->pscsi_cdb);
  745. req->cmd = &pt->pscsi_cdb[0];
  746. /*
  747. * Setup pointer for outgoing sense data.
  748. */
  749. req->sense = (void *)&pt->pscsi_sense[0];
  750. req->sense_len = 0;
  751. }
  752. /*
  753. * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
  754. */
  755. static int pscsi_blk_get_request(struct se_task *task)
  756. {
  757. struct pscsi_plugin_task *pt = PSCSI_TASK(task);
  758. struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
  759. pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
  760. (task->task_data_direction == DMA_TO_DEVICE),
  761. GFP_KERNEL);
  762. if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
  763. printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
  764. IS_ERR(pt->pscsi_req));
  765. return PYX_TRANSPORT_LU_COMM_FAILURE;
  766. }
  767. /*
  768. * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
  769. * and setup rq callback, CDB and sense.
  770. */
  771. pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
  772. return 0;
  773. }
  774. /* pscsi_do_task(): (Part of se_subsystem_api_t template)
  775. *
  776. *
  777. */
  778. static int pscsi_do_task(struct se_task *task)
  779. {
  780. struct pscsi_plugin_task *pt = PSCSI_TASK(task);
  781. struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
  782. /*
  783. * Set the struct request->timeout value based on peripheral
  784. * device type from SCSI.
  785. */
  786. if (pdv->pdv_sd->type == TYPE_DISK)
  787. pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
  788. else
  789. pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
  790. pt->pscsi_req->retries = PS_RETRY;
  791. /*
  792. * Queue the struct request into the struct scsi_device->request_queue.
  793. * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
  794. * descriptor
  795. */
  796. blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
  797. (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
  798. pscsi_req_done);
  799. return PYX_TRANSPORT_SENT_TO_TRANSPORT;
  800. }
  801. static void pscsi_free_task(struct se_task *task)
  802. {
  803. struct pscsi_plugin_task *pt = PSCSI_TASK(task);
  804. struct se_cmd *cmd = task->task_se_cmd;
  805. /*
  806. * Release the extended CDB allocation from pscsi_alloc_task()
  807. * if one exists.
  808. */
  809. if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
  810. kfree(pt->pscsi_cdb);
  811. /*
  812. * We do not release the bio(s) here associated with this task, as
  813. * this is handled by bio_put() and pscsi_bi_endio().
  814. */
  815. kfree(pt);
  816. }
  817. enum {
  818. Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
  819. Opt_scsi_lun_id, Opt_err
  820. };
  821. static match_table_t tokens = {
  822. {Opt_scsi_host_id, "scsi_host_id=%d"},
  823. {Opt_scsi_channel_id, "scsi_channel_id=%d"},
  824. {Opt_scsi_target_id, "scsi_target_id=%d"},
  825. {Opt_scsi_lun_id, "scsi_lun_id=%d"},
  826. {Opt_err, NULL}
  827. };
  828. static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
  829. struct se_subsystem_dev *se_dev,
  830. const char *page,
  831. ssize_t count)
  832. {
  833. struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
  834. struct pscsi_hba_virt *phv = hba->hba_ptr;
  835. char *orig, *ptr, *opts;
  836. substring_t args[MAX_OPT_ARGS];
  837. int ret = 0, arg, token;
  838. opts = kstrdup(page, GFP_KERNEL);
  839. if (!opts)
  840. return -ENOMEM;
  841. orig = opts;
  842. while ((ptr = strsep(&opts, ",")) != NULL) {
  843. if (!*ptr)
  844. continue;
  845. token = match_token(ptr, tokens, args);
  846. switch (token) {
  847. case Opt_scsi_host_id:
  848. if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
  849. printk(KERN_ERR "PSCSI[%d]: Unable to accept"
  850. " scsi_host_id while phv_mode =="
  851. " PHV_LLD_SCSI_HOST_NO\n",
  852. phv->phv_host_id);
  853. ret = -EINVAL;
  854. goto out;
  855. }
  856. match_int(args, &arg);
  857. pdv->pdv_host_id = arg;
  858. printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
  859. " %d\n", phv->phv_host_id, pdv->pdv_host_id);
  860. pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
  861. break;
  862. case Opt_scsi_channel_id:
  863. match_int(args, &arg);
  864. pdv->pdv_channel_id = arg;
  865. printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
  866. " ID: %d\n", phv->phv_host_id,
  867. pdv->pdv_channel_id);
  868. pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
  869. break;
  870. case Opt_scsi_target_id:
  871. match_int(args, &arg);
  872. pdv->pdv_target_id = arg;
  873. printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
  874. " ID: %d\n", phv->phv_host_id,
  875. pdv->pdv_target_id);
  876. pdv->pdv_flags |= PDF_HAS_TARGET_ID;
  877. break;
  878. case Opt_scsi_lun_id:
  879. match_int(args, &arg);
  880. pdv->pdv_lun_id = arg;
  881. printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
  882. " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
  883. pdv->pdv_flags |= PDF_HAS_LUN_ID;
  884. break;
  885. default:
  886. break;
  887. }
  888. }
  889. out:
  890. kfree(orig);
  891. return (!ret) ? count : ret;
  892. }
  893. static ssize_t pscsi_check_configfs_dev_params(
  894. struct se_hba *hba,
  895. struct se_subsystem_dev *se_dev)
  896. {
  897. struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
  898. if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
  899. !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
  900. !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
  901. printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
  902. " scsi_lun_id= parameters\n");
  903. return -1;
  904. }
  905. return 0;
  906. }
  907. static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
  908. struct se_subsystem_dev *se_dev,
  909. char *b)
  910. {
  911. struct pscsi_hba_virt *phv = hba->hba_ptr;
  912. struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
  913. struct scsi_device *sd = pdv->pdv_sd;
  914. unsigned char host_id[16];
  915. ssize_t bl;
  916. int i;
  917. if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
  918. snprintf(host_id, 16, "%d", pdv->pdv_host_id);
  919. else
  920. snprintf(host_id, 16, "PHBA Mode");
  921. bl = sprintf(b, "SCSI Device Bus Location:"
  922. " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
  923. pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
  924. host_id);
  925. if (sd) {
  926. bl += sprintf(b + bl, " ");
  927. bl += sprintf(b + bl, "Vendor: ");
  928. for (i = 0; i < 8; i++) {
  929. if (ISPRINT(sd->vendor[i])) /* printable character? */
  930. bl += sprintf(b + bl, "%c", sd->vendor[i]);
  931. else
  932. bl += sprintf(b + bl, " ");
  933. }
  934. bl += sprintf(b + bl, " Model: ");
  935. for (i = 0; i < 16; i++) {
  936. if (ISPRINT(sd->model[i])) /* printable character ? */
  937. bl += sprintf(b + bl, "%c", sd->model[i]);
  938. else
  939. bl += sprintf(b + bl, " ");
  940. }
  941. bl += sprintf(b + bl, " Rev: ");
  942. for (i = 0; i < 4; i++) {
  943. if (ISPRINT(sd->rev[i])) /* printable character ? */
  944. bl += sprintf(b + bl, "%c", sd->rev[i]);
  945. else
  946. bl += sprintf(b + bl, " ");
  947. }
  948. bl += sprintf(b + bl, "\n");
  949. }
  950. return bl;
  951. }
  952. static void pscsi_bi_endio(struct bio *bio, int error)
  953. {
  954. bio_put(bio);
  955. }
  956. static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
  957. {
  958. struct bio *bio;
  959. /*
  960. * Use bio_malloc() following the comment in for bio -> struct request
  961. * in block/blk-core.c:blk_make_request()
  962. */
  963. bio = bio_kmalloc(GFP_KERNEL, sg_num);
  964. if (!(bio)) {
  965. printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
  966. return NULL;
  967. }
  968. bio->bi_end_io = pscsi_bi_endio;
  969. return bio;
  970. }
  971. #if 0
  972. #define DEBUG_PSCSI(x...) printk(x)
  973. #else
  974. #define DEBUG_PSCSI(x...)
  975. #endif
  976. static int __pscsi_map_task_SG(
  977. struct se_task *task,
  978. struct scatterlist *task_sg,
  979. u32 task_sg_num,
  980. int bidi_read)
  981. {
  982. struct pscsi_plugin_task *pt = PSCSI_TASK(task);
  983. struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
  984. struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
  985. struct page *page;
  986. struct scatterlist *sg;
  987. u32 data_len = task->task_size, i, len, bytes, off;
  988. int nr_pages = (task->task_size + task_sg[0].offset +
  989. PAGE_SIZE - 1) >> PAGE_SHIFT;
  990. int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
  991. int rw = (task->task_data_direction == DMA_TO_DEVICE);
  992. if (!task->task_size)
  993. return 0;
  994. /*
  995. * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
  996. * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
  997. * struct scatterlist memory. The struct se_task->task_sg[] currently needs
  998. * to be attached to struct bios for submission to Linux/SCSI using
  999. * struct request to struct scsi_device->request_queue.
  1000. *
  1001. * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
  1002. * is ported to upstream SCSI passthrough functionality that accepts
  1003. * struct scatterlist->page_link or struct page as a paraemeter.
  1004. */
  1005. DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
  1006. for_each_sg(task_sg, sg, task_sg_num, i) {
  1007. page = sg_page(sg);
  1008. off = sg->offset;
  1009. len = sg->length;
  1010. DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
  1011. page, len, off);
  1012. while (len > 0 && data_len > 0) {
  1013. bytes = min_t(unsigned int, len, PAGE_SIZE - off);
  1014. bytes = min(bytes, data_len);
  1015. if (!(bio)) {
  1016. nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
  1017. nr_pages -= nr_vecs;
  1018. /*
  1019. * Calls bio_kmalloc() and sets bio->bi_end_io()
  1020. */
  1021. bio = pscsi_get_bio(pdv, nr_vecs);
  1022. if (!(bio))
  1023. goto fail;
  1024. if (rw)
  1025. bio->bi_rw |= REQ_WRITE;
  1026. DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
  1027. " dir: %s nr_vecs: %d\n", bio,
  1028. (rw) ? "rw" : "r", nr_vecs);
  1029. /*
  1030. * Set *hbio pointer to handle the case:
  1031. * nr_pages > BIO_MAX_PAGES, where additional
  1032. * bios need to be added to complete a given
  1033. * struct se_task
  1034. */
  1035. if (!hbio)
  1036. hbio = tbio = bio;
  1037. else
  1038. tbio = tbio->bi_next = bio;
  1039. }
  1040. DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
  1041. " bio: %p page: %p len: %d off: %d\n", i, bio,
  1042. page, len, off);
  1043. rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
  1044. bio, page, bytes, off);
  1045. if (rc != bytes)
  1046. goto fail;
  1047. DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
  1048. bio->bi_vcnt, nr_vecs);
  1049. if (bio->bi_vcnt > nr_vecs) {
  1050. DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
  1051. " %d i: %d bio: %p, allocating another"
  1052. " bio\n", bio->bi_vcnt, i, bio);
  1053. /*
  1054. * Clear the pointer so that another bio will
  1055. * be allocated with pscsi_get_bio() above, the
  1056. * current bio has already been set *tbio and
  1057. * bio->bi_next.
  1058. */
  1059. bio = NULL;
  1060. }
  1061. page++;
  1062. len -= bytes;
  1063. data_len -= bytes;
  1064. off = 0;
  1065. }
  1066. }
  1067. /*
  1068. * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
  1069. * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
  1070. */
  1071. if (!(bidi_read)) {
  1072. /*
  1073. * Starting with v2.6.31, call blk_make_request() passing in *hbio to
  1074. * allocate the pSCSI task a struct request.
  1075. */
  1076. pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
  1077. hbio, GFP_KERNEL);
  1078. if (!(pt->pscsi_req)) {
  1079. printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
  1080. goto fail;
  1081. }
  1082. /*
  1083. * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
  1084. * and setup rq callback, CDB and sense.
  1085. */
  1086. pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
  1087. return task->task_sg_num;
  1088. }
  1089. /*
  1090. * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
  1091. * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
  1092. */
  1093. pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
  1094. hbio, GFP_KERNEL);
  1095. if (!(pt->pscsi_req->next_rq)) {
  1096. printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
  1097. goto fail;
  1098. }
  1099. pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
  1100. return task->task_sg_num;
  1101. fail:
  1102. while (hbio) {
  1103. bio = hbio;
  1104. hbio = hbio->bi_next;
  1105. bio->bi_next = NULL;
  1106. bio_endio(bio, 0);
  1107. }
  1108. return ret;
  1109. }
  1110. static int pscsi_map_task_SG(struct se_task *task)
  1111. {
  1112. int ret;
  1113. /*
  1114. * Setup the main struct request for the task->task_sg[] payload
  1115. */
  1116. ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
  1117. if (ret >= 0 && task->task_sg_bidi) {
  1118. /*
  1119. * If present, set up the extra BIDI-COMMAND SCSI READ
  1120. * struct request and payload.
  1121. */
  1122. ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
  1123. task->task_sg_num, 1);
  1124. }
  1125. if (ret < 0)
  1126. return PYX_TRANSPORT_LU_COMM_FAILURE;
  1127. return 0;
  1128. }
  1129. /* pscsi_map_task_non_SG():
  1130. *
  1131. *
  1132. */
  1133. static int pscsi_map_task_non_SG(struct se_task *task)
  1134. {
  1135. struct se_cmd *cmd = TASK_CMD(task);
  1136. struct pscsi_plugin_task *pt = PSCSI_TASK(task);
  1137. struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
  1138. int ret = 0;
  1139. if (pscsi_blk_get_request(task) < 0)
  1140. return PYX_TRANSPORT_LU_COMM_FAILURE;
  1141. if (!task->task_size)
  1142. return 0;
  1143. ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
  1144. pt->pscsi_req, T_TASK(cmd)->t_task_buf,
  1145. task->task_size, GFP_KERNEL);
  1146. if (ret < 0) {
  1147. printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
  1148. return PYX_TRANSPORT_LU_COMM_FAILURE;
  1149. }
  1150. return 0;
  1151. }
  1152. static int pscsi_CDB_none(struct se_task *task)
  1153. {
  1154. return pscsi_blk_get_request(task);
  1155. }
  1156. /* pscsi_get_cdb():
  1157. *
  1158. *
  1159. */
  1160. static unsigned char *pscsi_get_cdb(struct se_task *task)
  1161. {
  1162. struct pscsi_plugin_task *pt = PSCSI_TASK(task);
  1163. return pt->pscsi_cdb;
  1164. }
  1165. /* pscsi_get_sense_buffer():
  1166. *
  1167. *
  1168. */
  1169. static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
  1170. {
  1171. struct pscsi_plugin_task *pt = PSCSI_TASK(task);
  1172. return (unsigned char *)&pt->pscsi_sense[0];
  1173. }
  1174. /* pscsi_get_device_rev():
  1175. *
  1176. *
  1177. */
  1178. static u32 pscsi_get_device_rev(struct se_device *dev)
  1179. {
  1180. struct pscsi_dev_virt *pdv = dev->dev_ptr;
  1181. struct scsi_device *sd = pdv->pdv_sd;
  1182. return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
  1183. }
  1184. /* pscsi_get_device_type():
  1185. *
  1186. *
  1187. */
  1188. static u32 pscsi_get_device_type(struct se_device *dev)
  1189. {
  1190. struct pscsi_dev_virt *pdv = dev->dev_ptr;
  1191. struct scsi_device *sd = pdv->pdv_sd;
  1192. return sd->type;
  1193. }
  1194. static sector_t pscsi_get_blocks(struct se_device *dev)
  1195. {
  1196. struct pscsi_dev_virt *pdv = dev->dev_ptr;
  1197. if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
  1198. return pdv->pdv_bd->bd_part->nr_sects;
  1199. dump_stack();
  1200. return 0;
  1201. }
  1202. /* pscsi_handle_SAM_STATUS_failures():
  1203. *
  1204. *
  1205. */
  1206. static inline void pscsi_process_SAM_status(
  1207. struct se_task *task,
  1208. struct pscsi_plugin_task *pt)
  1209. {
  1210. task->task_scsi_status = status_byte(pt->pscsi_result);
  1211. if ((task->task_scsi_status)) {
  1212. task->task_scsi_status <<= 1;
  1213. printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
  1214. " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
  1215. pt->pscsi_result);
  1216. }
  1217. switch (host_byte(pt->pscsi_result)) {
  1218. case DID_OK:
  1219. transport_complete_task(task, (!task->task_scsi_status));
  1220. break;
  1221. default:
  1222. printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
  1223. " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
  1224. pt->pscsi_result);
  1225. task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
  1226. task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
  1227. TASK_CMD(task)->transport_error_status =
  1228. PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
  1229. transport_complete_task(task, 0);
  1230. break;
  1231. }
  1232. return;
  1233. }
  1234. static void pscsi_req_done(struct request *req, int uptodate)
  1235. {
  1236. struct se_task *task = req->end_io_data;
  1237. struct pscsi_plugin_task *pt = PSCSI_TASK(task);
  1238. pt->pscsi_result = req->errors;
  1239. pt->pscsi_resid = req->resid_len;
  1240. pscsi_process_SAM_status(task, pt);
  1241. /*
  1242. * Release BIDI-READ if present
  1243. */
  1244. if (req->next_rq != NULL)
  1245. __blk_put_request(req->q, req->next_rq);
  1246. __blk_put_request(req->q, req);
  1247. pt->pscsi_req = NULL;
  1248. }
  1249. static struct se_subsystem_api pscsi_template = {
  1250. .name = "pscsi",
  1251. .owner = THIS_MODULE,
  1252. .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
  1253. .cdb_none = pscsi_CDB_none,
  1254. .map_task_non_SG = pscsi_map_task_non_SG,
  1255. .map_task_SG = pscsi_map_task_SG,
  1256. .attach_hba = pscsi_attach_hba,
  1257. .detach_hba = pscsi_detach_hba,
  1258. .pmode_enable_hba = pscsi_pmode_enable_hba,
  1259. .allocate_virtdevice = pscsi_allocate_virtdevice,
  1260. .create_virtdevice = pscsi_create_virtdevice,
  1261. .free_device = pscsi_free_device,
  1262. .transport_complete = pscsi_transport_complete,
  1263. .alloc_task = pscsi_alloc_task,
  1264. .do_task = pscsi_do_task,
  1265. .free_task = pscsi_free_task,
  1266. .check_configfs_dev_params = pscsi_check_configfs_dev_params,
  1267. .set_configfs_dev_params = pscsi_set_configfs_dev_params,
  1268. .show_configfs_dev_params = pscsi_show_configfs_dev_params,
  1269. .get_cdb = pscsi_get_cdb,
  1270. .get_sense_buffer = pscsi_get_sense_buffer,
  1271. .get_device_rev = pscsi_get_device_rev,
  1272. .get_device_type = pscsi_get_device_type,
  1273. .get_blocks = pscsi_get_blocks,
  1274. };
  1275. static int __init pscsi_module_init(void)
  1276. {
  1277. return transport_subsystem_register(&pscsi_template);
  1278. }
  1279. static void pscsi_module_exit(void)
  1280. {
  1281. transport_subsystem_release(&pscsi_template);
  1282. }
  1283. MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
  1284. MODULE_AUTHOR("nab@Linux-iSCSI.org");
  1285. MODULE_LICENSE("GPL");
  1286. module_init(pscsi_module_init);
  1287. module_exit(pscsi_module_exit);