target_core_file.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. /*******************************************************************************
  2. * Filename: target_core_file.c
  3. *
  4. * This file contains the Storage Engine <-> FILEIO transport specific functions
  5. *
  6. * Copyright (c) 2005 PyX Technologies, Inc.
  7. * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
  8. * Copyright (c) 2007-2010 Rising Tide Systems
  9. * Copyright (c) 2008-2010 Linux-iSCSI.org
  10. *
  11. * Nicholas A. Bellinger <nab@kernel.org>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. *
  27. ******************************************************************************/
  28. #include <linux/version.h>
  29. #include <linux/string.h>
  30. #include <linux/parser.h>
  31. #include <linux/timer.h>
  32. #include <linux/blkdev.h>
  33. #include <linux/slab.h>
  34. #include <linux/spinlock.h>
  35. #include <scsi/scsi.h>
  36. #include <scsi/scsi_host.h>
  37. #include <target/target_core_base.h>
  38. #include <target/target_core_device.h>
  39. #include <target/target_core_transport.h>
  40. #include "target_core_file.h"
  41. #if 1
  42. #define DEBUG_FD_CACHE(x...) printk(x)
  43. #else
  44. #define DEBUG_FD_CACHE(x...)
  45. #endif
  46. #if 1
  47. #define DEBUG_FD_FUA(x...) printk(x)
  48. #else
  49. #define DEBUG_FD_FUA(x...)
  50. #endif
  51. static struct se_subsystem_api fileio_template;
  52. /* fd_attach_hba(): (Part of se_subsystem_api_t template)
  53. *
  54. *
  55. */
  56. static int fd_attach_hba(struct se_hba *hba, u32 host_id)
  57. {
  58. struct fd_host *fd_host;
  59. fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
  60. if (!(fd_host)) {
  61. printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
  62. return -ENOMEM;
  63. }
  64. fd_host->fd_host_id = host_id;
  65. hba->hba_ptr = fd_host;
  66. printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
  67. " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
  68. TARGET_CORE_MOD_VERSION);
  69. printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
  70. " MaxSectors: %u\n",
  71. hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
  72. return 0;
  73. }
  74. static void fd_detach_hba(struct se_hba *hba)
  75. {
  76. struct fd_host *fd_host = hba->hba_ptr;
  77. printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
  78. " Target Core\n", hba->hba_id, fd_host->fd_host_id);
  79. kfree(fd_host);
  80. hba->hba_ptr = NULL;
  81. }
  82. static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
  83. {
  84. struct fd_dev *fd_dev;
  85. struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
  86. fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
  87. if (!(fd_dev)) {
  88. printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
  89. return NULL;
  90. }
  91. fd_dev->fd_host = fd_host;
  92. printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
  93. return fd_dev;
  94. }
  95. /* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
  96. *
  97. *
  98. */
  99. static struct se_device *fd_create_virtdevice(
  100. struct se_hba *hba,
  101. struct se_subsystem_dev *se_dev,
  102. void *p)
  103. {
  104. char *dev_p = NULL;
  105. struct se_device *dev;
  106. struct se_dev_limits dev_limits;
  107. struct queue_limits *limits;
  108. struct fd_dev *fd_dev = (struct fd_dev *) p;
  109. struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
  110. mm_segment_t old_fs;
  111. struct file *file;
  112. struct inode *inode = NULL;
  113. int dev_flags = 0, flags, ret = -EINVAL;
  114. memset(&dev_limits, 0, sizeof(struct se_dev_limits));
  115. old_fs = get_fs();
  116. set_fs(get_ds());
  117. dev_p = getname(fd_dev->fd_dev_name);
  118. set_fs(old_fs);
  119. if (IS_ERR(dev_p)) {
  120. printk(KERN_ERR "getname(%s) failed: %lu\n",
  121. fd_dev->fd_dev_name, IS_ERR(dev_p));
  122. ret = PTR_ERR(dev_p);
  123. goto fail;
  124. }
  125. #if 0
  126. if (di->no_create_file)
  127. flags = O_RDWR | O_LARGEFILE;
  128. else
  129. flags = O_RDWR | O_CREAT | O_LARGEFILE;
  130. #else
  131. flags = O_RDWR | O_CREAT | O_LARGEFILE;
  132. #endif
  133. /* flags |= O_DIRECT; */
  134. /*
  135. * If fd_buffered_io=1 has not been set explicitly (the default),
  136. * use O_SYNC to force FILEIO writes to disk.
  137. */
  138. if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
  139. flags |= O_SYNC;
  140. file = filp_open(dev_p, flags, 0600);
  141. if (IS_ERR(file)) {
  142. printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
  143. ret = PTR_ERR(file);
  144. goto fail;
  145. }
  146. if (!file || !file->f_dentry) {
  147. printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
  148. goto fail;
  149. }
  150. fd_dev->fd_file = file;
  151. /*
  152. * If using a block backend with this struct file, we extract
  153. * fd_dev->fd_[block,dev]_size from struct block_device.
  154. *
  155. * Otherwise, we use the passed fd_size= from configfs
  156. */
  157. inode = file->f_mapping->host;
  158. if (S_ISBLK(inode->i_mode)) {
  159. struct request_queue *q;
  160. /*
  161. * Setup the local scope queue_limits from struct request_queue->limits
  162. * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
  163. */
  164. q = bdev_get_queue(inode->i_bdev);
  165. limits = &dev_limits.limits;
  166. limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
  167. limits->max_hw_sectors = queue_max_hw_sectors(q);
  168. limits->max_sectors = queue_max_sectors(q);
  169. /*
  170. * Determine the number of bytes from i_size_read() minus
  171. * one (1) logical sector from underlying struct block_device
  172. */
  173. fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
  174. fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
  175. fd_dev->fd_block_size);
  176. printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
  177. " block_device blocks: %llu logical_block_size: %d\n",
  178. fd_dev->fd_dev_size,
  179. div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
  180. fd_dev->fd_block_size);
  181. } else {
  182. if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
  183. printk(KERN_ERR "FILEIO: Missing fd_dev_size="
  184. " parameter, and no backing struct"
  185. " block_device\n");
  186. goto fail;
  187. }
  188. limits = &dev_limits.limits;
  189. limits->logical_block_size = FD_BLOCKSIZE;
  190. limits->max_hw_sectors = FD_MAX_SECTORS;
  191. limits->max_sectors = FD_MAX_SECTORS;
  192. fd_dev->fd_block_size = FD_BLOCKSIZE;
  193. }
  194. dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
  195. dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
  196. dev = transport_add_device_to_core_hba(hba, &fileio_template,
  197. se_dev, dev_flags, fd_dev,
  198. &dev_limits, "FILEIO", FD_VERSION);
  199. if (!(dev))
  200. goto fail;
  201. fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
  202. fd_dev->fd_queue_depth = dev->queue_depth;
  203. printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
  204. " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
  205. fd_dev->fd_dev_name, fd_dev->fd_dev_size);
  206. putname(dev_p);
  207. return dev;
  208. fail:
  209. if (fd_dev->fd_file) {
  210. filp_close(fd_dev->fd_file, NULL);
  211. fd_dev->fd_file = NULL;
  212. }
  213. putname(dev_p);
  214. return ERR_PTR(ret);
  215. }
  216. /* fd_free_device(): (Part of se_subsystem_api_t template)
  217. *
  218. *
  219. */
  220. static void fd_free_device(void *p)
  221. {
  222. struct fd_dev *fd_dev = (struct fd_dev *) p;
  223. if (fd_dev->fd_file) {
  224. filp_close(fd_dev->fd_file, NULL);
  225. fd_dev->fd_file = NULL;
  226. }
  227. kfree(fd_dev);
  228. }
  229. static inline struct fd_request *FILE_REQ(struct se_task *task)
  230. {
  231. return container_of(task, struct fd_request, fd_task);
  232. }
  233. static struct se_task *
  234. fd_alloc_task(struct se_cmd *cmd)
  235. {
  236. struct fd_request *fd_req;
  237. fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
  238. if (!(fd_req)) {
  239. printk(KERN_ERR "Unable to allocate struct fd_request\n");
  240. return NULL;
  241. }
  242. fd_req->fd_dev = cmd->se_dev->dev_ptr;
  243. return &fd_req->fd_task;
  244. }
  245. static int fd_do_readv(struct se_task *task)
  246. {
  247. struct fd_request *req = FILE_REQ(task);
  248. struct file *fd = req->fd_dev->fd_file;
  249. struct scatterlist *sg = task->task_sg;
  250. struct iovec *iov;
  251. mm_segment_t old_fs;
  252. loff_t pos = (task->task_lba *
  253. task->se_dev->se_sub_dev->se_dev_attrib.block_size);
  254. int ret = 0, i;
  255. iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
  256. if (!(iov)) {
  257. printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
  258. return -ENOMEM;
  259. }
  260. for (i = 0; i < task->task_sg_num; i++) {
  261. iov[i].iov_len = sg[i].length;
  262. iov[i].iov_base = sg_virt(&sg[i]);
  263. }
  264. old_fs = get_fs();
  265. set_fs(get_ds());
  266. ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
  267. set_fs(old_fs);
  268. kfree(iov);
  269. /*
  270. * Return zeros and GOOD status even if the READ did not return
  271. * the expected virt_size for struct file w/o a backing struct
  272. * block_device.
  273. */
  274. if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
  275. if (ret < 0 || ret != task->task_size) {
  276. printk(KERN_ERR "vfs_readv() returned %d,"
  277. " expecting %d for S_ISBLK\n", ret,
  278. (int)task->task_size);
  279. return (ret < 0 ? ret : -EINVAL);
  280. }
  281. } else {
  282. if (ret < 0) {
  283. printk(KERN_ERR "vfs_readv() returned %d for non"
  284. " S_ISBLK\n", ret);
  285. return ret;
  286. }
  287. }
  288. return 1;
  289. }
  290. static int fd_do_writev(struct se_task *task)
  291. {
  292. struct fd_request *req = FILE_REQ(task);
  293. struct file *fd = req->fd_dev->fd_file;
  294. struct scatterlist *sg = task->task_sg;
  295. struct iovec *iov;
  296. mm_segment_t old_fs;
  297. loff_t pos = (task->task_lba *
  298. task->se_dev->se_sub_dev->se_dev_attrib.block_size);
  299. int ret, i = 0;
  300. iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
  301. if (!(iov)) {
  302. printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
  303. return -ENOMEM;
  304. }
  305. for (i = 0; i < task->task_sg_num; i++) {
  306. iov[i].iov_len = sg[i].length;
  307. iov[i].iov_base = sg_virt(&sg[i]);
  308. }
  309. old_fs = get_fs();
  310. set_fs(get_ds());
  311. ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
  312. set_fs(old_fs);
  313. kfree(iov);
  314. if (ret < 0 || ret != task->task_size) {
  315. printk(KERN_ERR "vfs_writev() returned %d\n", ret);
  316. return (ret < 0 ? ret : -EINVAL);
  317. }
  318. return 1;
  319. }
  320. static void fd_emulate_sync_cache(struct se_task *task)
  321. {
  322. struct se_cmd *cmd = task->task_se_cmd;
  323. struct se_device *dev = cmd->se_dev;
  324. struct fd_dev *fd_dev = dev->dev_ptr;
  325. int immed = (cmd->t_task.t_task_cdb[1] & 0x2);
  326. loff_t start, end;
  327. int ret;
  328. /*
  329. * If the Immediate bit is set, queue up the GOOD response
  330. * for this SYNCHRONIZE_CACHE op
  331. */
  332. if (immed)
  333. transport_complete_sync_cache(cmd, 1);
  334. /*
  335. * Determine if we will be flushing the entire device.
  336. */
  337. if (cmd->t_task.t_task_lba == 0 && cmd->data_length == 0) {
  338. start = 0;
  339. end = LLONG_MAX;
  340. } else {
  341. start = cmd->t_task.t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
  342. if (cmd->data_length)
  343. end = start + cmd->data_length;
  344. else
  345. end = LLONG_MAX;
  346. }
  347. ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
  348. if (ret != 0)
  349. printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
  350. if (!immed)
  351. transport_complete_sync_cache(cmd, ret == 0);
  352. }
  353. /*
  354. * Tell TCM Core that we are capable of WriteCache emulation for
  355. * an underlying struct se_device.
  356. */
  357. static int fd_emulated_write_cache(struct se_device *dev)
  358. {
  359. return 1;
  360. }
  361. static int fd_emulated_dpo(struct se_device *dev)
  362. {
  363. return 0;
  364. }
  365. /*
  366. * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
  367. * for TYPE_DISK.
  368. */
  369. static int fd_emulated_fua_write(struct se_device *dev)
  370. {
  371. return 1;
  372. }
  373. static int fd_emulated_fua_read(struct se_device *dev)
  374. {
  375. return 0;
  376. }
  377. /*
  378. * WRITE Force Unit Access (FUA) emulation on a per struct se_task
  379. * LBA range basis..
  380. */
  381. static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
  382. {
  383. struct se_device *dev = cmd->se_dev;
  384. struct fd_dev *fd_dev = dev->dev_ptr;
  385. loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
  386. loff_t end = start + task->task_size;
  387. int ret;
  388. DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
  389. task->task_lba, task->task_size);
  390. ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
  391. if (ret != 0)
  392. printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
  393. }
  394. static int fd_do_task(struct se_task *task)
  395. {
  396. struct se_cmd *cmd = task->task_se_cmd;
  397. struct se_device *dev = cmd->se_dev;
  398. int ret = 0;
  399. /*
  400. * Call vectorized fileio functions to map struct scatterlist
  401. * physical memory addresses to struct iovec virtual memory.
  402. */
  403. if (task->task_data_direction == DMA_FROM_DEVICE) {
  404. ret = fd_do_readv(task);
  405. } else {
  406. ret = fd_do_writev(task);
  407. if (ret > 0 &&
  408. dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
  409. dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
  410. cmd->t_task.t_tasks_fua) {
  411. /*
  412. * We might need to be a bit smarter here
  413. * and return some sense data to let the initiator
  414. * know the FUA WRITE cache sync failed..?
  415. */
  416. fd_emulate_write_fua(cmd, task);
  417. }
  418. }
  419. if (ret < 0)
  420. return ret;
  421. if (ret) {
  422. task->task_scsi_status = GOOD;
  423. transport_complete_task(task, 1);
  424. }
  425. return PYX_TRANSPORT_SENT_TO_TRANSPORT;
  426. }
  427. /* fd_free_task(): (Part of se_subsystem_api_t template)
  428. *
  429. *
  430. */
  431. static void fd_free_task(struct se_task *task)
  432. {
  433. struct fd_request *req = FILE_REQ(task);
  434. kfree(req);
  435. }
  436. enum {
  437. Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
  438. };
  439. static match_table_t tokens = {
  440. {Opt_fd_dev_name, "fd_dev_name=%s"},
  441. {Opt_fd_dev_size, "fd_dev_size=%s"},
  442. {Opt_fd_buffered_io, "fd_buffered_io=%d"},
  443. {Opt_err, NULL}
  444. };
  445. static ssize_t fd_set_configfs_dev_params(
  446. struct se_hba *hba,
  447. struct se_subsystem_dev *se_dev,
  448. const char *page, ssize_t count)
  449. {
  450. struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
  451. char *orig, *ptr, *arg_p, *opts;
  452. substring_t args[MAX_OPT_ARGS];
  453. int ret = 0, arg, token;
  454. opts = kstrdup(page, GFP_KERNEL);
  455. if (!opts)
  456. return -ENOMEM;
  457. orig = opts;
  458. while ((ptr = strsep(&opts, ",")) != NULL) {
  459. if (!*ptr)
  460. continue;
  461. token = match_token(ptr, tokens, args);
  462. switch (token) {
  463. case Opt_fd_dev_name:
  464. arg_p = match_strdup(&args[0]);
  465. if (!arg_p) {
  466. ret = -ENOMEM;
  467. break;
  468. }
  469. snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
  470. "%s", arg_p);
  471. kfree(arg_p);
  472. printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
  473. fd_dev->fd_dev_name);
  474. fd_dev->fbd_flags |= FBDF_HAS_PATH;
  475. break;
  476. case Opt_fd_dev_size:
  477. arg_p = match_strdup(&args[0]);
  478. if (!arg_p) {
  479. ret = -ENOMEM;
  480. break;
  481. }
  482. ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
  483. kfree(arg_p);
  484. if (ret < 0) {
  485. printk(KERN_ERR "strict_strtoull() failed for"
  486. " fd_dev_size=\n");
  487. goto out;
  488. }
  489. printk(KERN_INFO "FILEIO: Referencing Size: %llu"
  490. " bytes\n", fd_dev->fd_dev_size);
  491. fd_dev->fbd_flags |= FBDF_HAS_SIZE;
  492. break;
  493. case Opt_fd_buffered_io:
  494. match_int(args, &arg);
  495. if (arg != 1) {
  496. printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
  497. ret = -EINVAL;
  498. goto out;
  499. }
  500. printk(KERN_INFO "FILEIO: Using buffered I/O"
  501. " operations for struct fd_dev\n");
  502. fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
  503. break;
  504. default:
  505. break;
  506. }
  507. }
  508. out:
  509. kfree(orig);
  510. return (!ret) ? count : ret;
  511. }
  512. static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
  513. {
  514. struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
  515. if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
  516. printk(KERN_ERR "Missing fd_dev_name=\n");
  517. return -EINVAL;
  518. }
  519. return 0;
  520. }
  521. static ssize_t fd_show_configfs_dev_params(
  522. struct se_hba *hba,
  523. struct se_subsystem_dev *se_dev,
  524. char *b)
  525. {
  526. struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
  527. ssize_t bl = 0;
  528. bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
  529. bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
  530. fd_dev->fd_dev_name, fd_dev->fd_dev_size,
  531. (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
  532. "Buffered" : "Synchronous");
  533. return bl;
  534. }
  535. /* fd_get_cdb(): (Part of se_subsystem_api_t template)
  536. *
  537. *
  538. */
  539. static unsigned char *fd_get_cdb(struct se_task *task)
  540. {
  541. struct fd_request *req = FILE_REQ(task);
  542. return req->fd_scsi_cdb;
  543. }
  544. /* fd_get_device_rev(): (Part of se_subsystem_api_t template)
  545. *
  546. *
  547. */
  548. static u32 fd_get_device_rev(struct se_device *dev)
  549. {
  550. return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
  551. }
  552. /* fd_get_device_type(): (Part of se_subsystem_api_t template)
  553. *
  554. *
  555. */
  556. static u32 fd_get_device_type(struct se_device *dev)
  557. {
  558. return TYPE_DISK;
  559. }
  560. static sector_t fd_get_blocks(struct se_device *dev)
  561. {
  562. struct fd_dev *fd_dev = dev->dev_ptr;
  563. unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
  564. dev->se_sub_dev->se_dev_attrib.block_size);
  565. return blocks_long;
  566. }
  567. static struct se_subsystem_api fileio_template = {
  568. .name = "fileio",
  569. .owner = THIS_MODULE,
  570. .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
  571. .attach_hba = fd_attach_hba,
  572. .detach_hba = fd_detach_hba,
  573. .allocate_virtdevice = fd_allocate_virtdevice,
  574. .create_virtdevice = fd_create_virtdevice,
  575. .free_device = fd_free_device,
  576. .dpo_emulated = fd_emulated_dpo,
  577. .fua_write_emulated = fd_emulated_fua_write,
  578. .fua_read_emulated = fd_emulated_fua_read,
  579. .write_cache_emulated = fd_emulated_write_cache,
  580. .alloc_task = fd_alloc_task,
  581. .do_task = fd_do_task,
  582. .do_sync_cache = fd_emulate_sync_cache,
  583. .free_task = fd_free_task,
  584. .check_configfs_dev_params = fd_check_configfs_dev_params,
  585. .set_configfs_dev_params = fd_set_configfs_dev_params,
  586. .show_configfs_dev_params = fd_show_configfs_dev_params,
  587. .get_cdb = fd_get_cdb,
  588. .get_device_rev = fd_get_device_rev,
  589. .get_device_type = fd_get_device_type,
  590. .get_blocks = fd_get_blocks,
  591. };
  592. static int __init fileio_module_init(void)
  593. {
  594. return transport_subsystem_register(&fileio_template);
  595. }
  596. static void fileio_module_exit(void)
  597. {
  598. transport_subsystem_release(&fileio_template);
  599. }
  600. MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
  601. MODULE_AUTHOR("nab@Linux-iSCSI.org");
  602. MODULE_LICENSE("GPL");
  603. module_init(fileio_module_init);
  604. module_exit(fileio_module_exit);