target_core_rd.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090
  1. /*******************************************************************************
  2. * Filename: target_core_rd.c
  3. *
  4. * This file contains the Storage Engine <-> Ramdisk transport
  5. * specific functions.
  6. *
  7. * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
  8. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  9. * Copyright (c) 2007-2010 Rising Tide Systems
  10. * Copyright (c) 2008-2010 Linux-iSCSI.org
  11. *
  12. * Nicholas A. Bellinger <nab@kernel.org>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  27. *
  28. ******************************************************************************/
  29. #include <linux/version.h>
  30. #include <linux/string.h>
  31. #include <linux/parser.h>
  32. #include <linux/timer.h>
  33. #include <linux/blkdev.h>
  34. #include <linux/slab.h>
  35. #include <linux/spinlock.h>
  36. #include <scsi/scsi.h>
  37. #include <scsi/scsi_host.h>
  38. #include <target/target_core_base.h>
  39. #include <target/target_core_device.h>
  40. #include <target/target_core_transport.h>
  41. #include <target/target_core_fabric_ops.h>
  42. #include "target_core_rd.h"
  43. static struct se_subsystem_api rd_dr_template;
  44. static struct se_subsystem_api rd_mcp_template;
  45. /* #define DEBUG_RAMDISK_MCP */
  46. /* #define DEBUG_RAMDISK_DR */
  47. /* rd_attach_hba(): (Part of se_subsystem_api_t template)
  48. *
  49. *
  50. */
  51. static int rd_attach_hba(struct se_hba *hba, u32 host_id)
  52. {
  53. struct rd_host *rd_host;
  54. rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
  55. if (!(rd_host)) {
  56. printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
  57. return -ENOMEM;
  58. }
  59. rd_host->rd_host_id = host_id;
  60. atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
  61. atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
  62. hba->hba_ptr = (void *) rd_host;
  63. printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
  64. " Generic Target Core Stack %s\n", hba->hba_id,
  65. RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
  66. printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
  67. " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
  68. rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
  69. RD_MAX_SECTORS);
  70. return 0;
  71. }
  72. static void rd_detach_hba(struct se_hba *hba)
  73. {
  74. struct rd_host *rd_host = hba->hba_ptr;
  75. printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
  76. " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
  77. kfree(rd_host);
  78. hba->hba_ptr = NULL;
  79. }
  80. /* rd_release_device_space():
  81. *
  82. *
  83. */
  84. static void rd_release_device_space(struct rd_dev *rd_dev)
  85. {
  86. u32 i, j, page_count = 0, sg_per_table;
  87. struct rd_dev_sg_table *sg_table;
  88. struct page *pg;
  89. struct scatterlist *sg;
  90. if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
  91. return;
  92. sg_table = rd_dev->sg_table_array;
  93. for (i = 0; i < rd_dev->sg_table_count; i++) {
  94. sg = sg_table[i].sg_table;
  95. sg_per_table = sg_table[i].rd_sg_count;
  96. for (j = 0; j < sg_per_table; j++) {
  97. pg = sg_page(&sg[j]);
  98. if ((pg)) {
  99. __free_page(pg);
  100. page_count++;
  101. }
  102. }
  103. kfree(sg);
  104. }
  105. printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
  106. " Device ID: %u, pages %u in %u tables total bytes %lu\n",
  107. rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
  108. rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
  109. kfree(sg_table);
  110. rd_dev->sg_table_array = NULL;
  111. rd_dev->sg_table_count = 0;
  112. }
  113. /* rd_build_device_space():
  114. *
  115. *
  116. */
  117. static int rd_build_device_space(struct rd_dev *rd_dev)
  118. {
  119. u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
  120. u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
  121. sizeof(struct scatterlist));
  122. struct rd_dev_sg_table *sg_table;
  123. struct page *pg;
  124. struct scatterlist *sg;
  125. if (rd_dev->rd_page_count <= 0) {
  126. printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
  127. rd_dev->rd_page_count);
  128. return -1;
  129. }
  130. total_sg_needed = rd_dev->rd_page_count;
  131. sg_tables = (total_sg_needed / max_sg_per_table) + 1;
  132. sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
  133. if (!(sg_table)) {
  134. printk(KERN_ERR "Unable to allocate memory for Ramdisk"
  135. " scatterlist tables\n");
  136. return -1;
  137. }
  138. rd_dev->sg_table_array = sg_table;
  139. rd_dev->sg_table_count = sg_tables;
  140. while (total_sg_needed) {
  141. sg_per_table = (total_sg_needed > max_sg_per_table) ?
  142. max_sg_per_table : total_sg_needed;
  143. sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
  144. GFP_KERNEL);
  145. if (!(sg)) {
  146. printk(KERN_ERR "Unable to allocate scatterlist array"
  147. " for struct rd_dev\n");
  148. return -1;
  149. }
  150. sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
  151. sg_table[i].sg_table = sg;
  152. sg_table[i].rd_sg_count = sg_per_table;
  153. sg_table[i].page_start_offset = page_offset;
  154. sg_table[i++].page_end_offset = (page_offset + sg_per_table)
  155. - 1;
  156. for (j = 0; j < sg_per_table; j++) {
  157. pg = alloc_pages(GFP_KERNEL, 0);
  158. if (!(pg)) {
  159. printk(KERN_ERR "Unable to allocate scatterlist"
  160. " pages for struct rd_dev_sg_table\n");
  161. return -1;
  162. }
  163. sg_assign_page(&sg[j], pg);
  164. sg[j].length = PAGE_SIZE;
  165. }
  166. page_offset += sg_per_table;
  167. total_sg_needed -= sg_per_table;
  168. }
  169. printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
  170. " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
  171. rd_dev->rd_dev_id, rd_dev->rd_page_count,
  172. rd_dev->sg_table_count);
  173. return 0;
  174. }
  175. static void *rd_allocate_virtdevice(
  176. struct se_hba *hba,
  177. const char *name,
  178. int rd_direct)
  179. {
  180. struct rd_dev *rd_dev;
  181. struct rd_host *rd_host = hba->hba_ptr;
  182. rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
  183. if (!(rd_dev)) {
  184. printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
  185. return NULL;
  186. }
  187. rd_dev->rd_host = rd_host;
  188. rd_dev->rd_direct = rd_direct;
  189. return rd_dev;
  190. }
  191. static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
  192. {
  193. return rd_allocate_virtdevice(hba, name, 1);
  194. }
  195. static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
  196. {
  197. return rd_allocate_virtdevice(hba, name, 0);
  198. }
  199. /* rd_create_virtdevice():
  200. *
  201. *
  202. */
  203. static struct se_device *rd_create_virtdevice(
  204. struct se_hba *hba,
  205. struct se_subsystem_dev *se_dev,
  206. void *p,
  207. int rd_direct)
  208. {
  209. struct se_device *dev;
  210. struct se_dev_limits dev_limits;
  211. struct rd_dev *rd_dev = p;
  212. struct rd_host *rd_host = hba->hba_ptr;
  213. int dev_flags = 0;
  214. char prod[16], rev[4];
  215. memset(&dev_limits, 0, sizeof(struct se_dev_limits));
  216. if (rd_build_device_space(rd_dev) < 0)
  217. goto fail;
  218. snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
  219. snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
  220. RD_MCP_VERSION);
  221. dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
  222. dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
  223. dev_limits.limits.max_sectors = RD_MAX_SECTORS;
  224. dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
  225. dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
  226. dev = transport_add_device_to_core_hba(hba,
  227. (rd_dev->rd_direct) ? &rd_dr_template :
  228. &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
  229. &dev_limits, prod, rev);
  230. if (!(dev))
  231. goto fail;
  232. rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
  233. rd_dev->rd_queue_depth = dev->queue_depth;
  234. printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
  235. " %u pages in %u tables, %lu total bytes\n",
  236. rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
  237. "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
  238. rd_dev->sg_table_count,
  239. (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
  240. return dev;
  241. fail:
  242. rd_release_device_space(rd_dev);
  243. return NULL;
  244. }
  245. static struct se_device *rd_DIRECT_create_virtdevice(
  246. struct se_hba *hba,
  247. struct se_subsystem_dev *se_dev,
  248. void *p)
  249. {
  250. return rd_create_virtdevice(hba, se_dev, p, 1);
  251. }
  252. static struct se_device *rd_MEMCPY_create_virtdevice(
  253. struct se_hba *hba,
  254. struct se_subsystem_dev *se_dev,
  255. void *p)
  256. {
  257. return rd_create_virtdevice(hba, se_dev, p, 0);
  258. }
  259. /* rd_free_device(): (Part of se_subsystem_api_t template)
  260. *
  261. *
  262. */
  263. static void rd_free_device(void *p)
  264. {
  265. struct rd_dev *rd_dev = p;
  266. rd_release_device_space(rd_dev);
  267. kfree(rd_dev);
  268. }
  269. static inline struct rd_request *RD_REQ(struct se_task *task)
  270. {
  271. return container_of(task, struct rd_request, rd_task);
  272. }
  273. static struct se_task *
  274. rd_alloc_task(struct se_cmd *cmd)
  275. {
  276. struct rd_request *rd_req;
  277. rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
  278. if (!rd_req) {
  279. printk(KERN_ERR "Unable to allocate struct rd_request\n");
  280. return NULL;
  281. }
  282. rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
  283. return &rd_req->rd_task;
  284. }
  285. /* rd_get_sg_table():
  286. *
  287. *
  288. */
  289. static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
  290. {
  291. u32 i;
  292. struct rd_dev_sg_table *sg_table;
  293. for (i = 0; i < rd_dev->sg_table_count; i++) {
  294. sg_table = &rd_dev->sg_table_array[i];
  295. if ((sg_table->page_start_offset <= page) &&
  296. (sg_table->page_end_offset >= page))
  297. return sg_table;
  298. }
  299. printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
  300. page);
  301. return NULL;
  302. }
  303. /* rd_MEMCPY_read():
  304. *
  305. *
  306. */
  307. static int rd_MEMCPY_read(struct rd_request *req)
  308. {
  309. struct se_task *task = &req->rd_task;
  310. struct rd_dev *dev = req->rd_dev;
  311. struct rd_dev_sg_table *table;
  312. struct scatterlist *sg_d, *sg_s;
  313. void *dst, *src;
  314. u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
  315. u32 length, page_end = 0, table_sg_end;
  316. u32 rd_offset = req->rd_offset;
  317. table = rd_get_sg_table(dev, req->rd_page);
  318. if (!(table))
  319. return -1;
  320. table_sg_end = (table->page_end_offset - req->rd_page);
  321. sg_d = task->task_sg;
  322. sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
  323. #ifdef DEBUG_RAMDISK_MCP
  324. printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
  325. " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
  326. req->rd_page, req->rd_offset);
  327. #endif
  328. src_offset = rd_offset;
  329. while (req->rd_size) {
  330. if ((sg_d[i].length - dst_offset) <
  331. (sg_s[j].length - src_offset)) {
  332. length = (sg_d[i].length - dst_offset);
  333. #ifdef DEBUG_RAMDISK_MCP
  334. printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
  335. " offset: %u sg_s[%d].length: %u\n", i,
  336. &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
  337. sg_s[j].length);
  338. printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
  339. " src_offset: %u\n", length, dst_offset,
  340. src_offset);
  341. #endif
  342. if (length > req->rd_size)
  343. length = req->rd_size;
  344. dst = sg_virt(&sg_d[i++]) + dst_offset;
  345. if (!dst)
  346. BUG();
  347. src = sg_virt(&sg_s[j]) + src_offset;
  348. if (!src)
  349. BUG();
  350. dst_offset = 0;
  351. src_offset = length;
  352. page_end = 0;
  353. } else {
  354. length = (sg_s[j].length - src_offset);
  355. #ifdef DEBUG_RAMDISK_MCP
  356. printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
  357. " offset: %u sg_s[%d].length: %u\n", i,
  358. &sg_d[i], sg_d[i].length, sg_d[i].offset,
  359. j, sg_s[j].length);
  360. printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
  361. " src_offset: %u\n", length, dst_offset,
  362. src_offset);
  363. #endif
  364. if (length > req->rd_size)
  365. length = req->rd_size;
  366. dst = sg_virt(&sg_d[i]) + dst_offset;
  367. if (!dst)
  368. BUG();
  369. if (sg_d[i].length == length) {
  370. i++;
  371. dst_offset = 0;
  372. } else
  373. dst_offset = length;
  374. src = sg_virt(&sg_s[j++]) + src_offset;
  375. if (!src)
  376. BUG();
  377. src_offset = 0;
  378. page_end = 1;
  379. }
  380. memcpy(dst, src, length);
  381. #ifdef DEBUG_RAMDISK_MCP
  382. printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
  383. " i: %u, j: %u\n", req->rd_page,
  384. (req->rd_size - length), length, i, j);
  385. #endif
  386. req->rd_size -= length;
  387. if (!(req->rd_size))
  388. return 0;
  389. if (!page_end)
  390. continue;
  391. if (++req->rd_page <= table->page_end_offset) {
  392. #ifdef DEBUG_RAMDISK_MCP
  393. printk(KERN_INFO "page: %u in same page table\n",
  394. req->rd_page);
  395. #endif
  396. continue;
  397. }
  398. #ifdef DEBUG_RAMDISK_MCP
  399. printk(KERN_INFO "getting new page table for page: %u\n",
  400. req->rd_page);
  401. #endif
  402. table = rd_get_sg_table(dev, req->rd_page);
  403. if (!(table))
  404. return -1;
  405. sg_s = &table->sg_table[j = 0];
  406. }
  407. return 0;
  408. }
  409. /* rd_MEMCPY_write():
  410. *
  411. *
  412. */
  413. static int rd_MEMCPY_write(struct rd_request *req)
  414. {
  415. struct se_task *task = &req->rd_task;
  416. struct rd_dev *dev = req->rd_dev;
  417. struct rd_dev_sg_table *table;
  418. struct scatterlist *sg_d, *sg_s;
  419. void *dst, *src;
  420. u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
  421. u32 length, page_end = 0, table_sg_end;
  422. u32 rd_offset = req->rd_offset;
  423. table = rd_get_sg_table(dev, req->rd_page);
  424. if (!(table))
  425. return -1;
  426. table_sg_end = (table->page_end_offset - req->rd_page);
  427. sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
  428. sg_s = task->task_sg;
  429. #ifdef DEBUG_RAMDISK_MCP
  430. printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
  431. " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
  432. req->rd_page, req->rd_offset);
  433. #endif
  434. dst_offset = rd_offset;
  435. while (req->rd_size) {
  436. if ((sg_s[i].length - src_offset) <
  437. (sg_d[j].length - dst_offset)) {
  438. length = (sg_s[i].length - src_offset);
  439. #ifdef DEBUG_RAMDISK_MCP
  440. printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
  441. " offset: %d sg_d[%d].length: %u\n", i,
  442. &sg_s[i], sg_s[i].length, sg_s[i].offset,
  443. j, sg_d[j].length);
  444. printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
  445. " dst_offset: %u\n", length, src_offset,
  446. dst_offset);
  447. #endif
  448. if (length > req->rd_size)
  449. length = req->rd_size;
  450. src = sg_virt(&sg_s[i++]) + src_offset;
  451. if (!src)
  452. BUG();
  453. dst = sg_virt(&sg_d[j]) + dst_offset;
  454. if (!dst)
  455. BUG();
  456. src_offset = 0;
  457. dst_offset = length;
  458. page_end = 0;
  459. } else {
  460. length = (sg_d[j].length - dst_offset);
  461. #ifdef DEBUG_RAMDISK_MCP
  462. printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
  463. " offset: %d sg_d[%d].length: %u\n", i,
  464. &sg_s[i], sg_s[i].length, sg_s[i].offset,
  465. j, sg_d[j].length);
  466. printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
  467. " dst_offset: %u\n", length, src_offset,
  468. dst_offset);
  469. #endif
  470. if (length > req->rd_size)
  471. length = req->rd_size;
  472. src = sg_virt(&sg_s[i]) + src_offset;
  473. if (!src)
  474. BUG();
  475. if (sg_s[i].length == length) {
  476. i++;
  477. src_offset = 0;
  478. } else
  479. src_offset = length;
  480. dst = sg_virt(&sg_d[j++]) + dst_offset;
  481. if (!dst)
  482. BUG();
  483. dst_offset = 0;
  484. page_end = 1;
  485. }
  486. memcpy(dst, src, length);
  487. #ifdef DEBUG_RAMDISK_MCP
  488. printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
  489. " i: %u, j: %u\n", req->rd_page,
  490. (req->rd_size - length), length, i, j);
  491. #endif
  492. req->rd_size -= length;
  493. if (!(req->rd_size))
  494. return 0;
  495. if (!page_end)
  496. continue;
  497. if (++req->rd_page <= table->page_end_offset) {
  498. #ifdef DEBUG_RAMDISK_MCP
  499. printk(KERN_INFO "page: %u in same page table\n",
  500. req->rd_page);
  501. #endif
  502. continue;
  503. }
  504. #ifdef DEBUG_RAMDISK_MCP
  505. printk(KERN_INFO "getting new page table for page: %u\n",
  506. req->rd_page);
  507. #endif
  508. table = rd_get_sg_table(dev, req->rd_page);
  509. if (!(table))
  510. return -1;
  511. sg_d = &table->sg_table[j = 0];
  512. }
  513. return 0;
  514. }
  515. /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
  516. *
  517. *
  518. */
  519. static int rd_MEMCPY_do_task(struct se_task *task)
  520. {
  521. struct se_device *dev = task->se_dev;
  522. struct rd_request *req = RD_REQ(task);
  523. unsigned long long lba;
  524. int ret;
  525. req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
  526. lba = task->task_lba;
  527. req->rd_offset = (do_div(lba,
  528. (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
  529. DEV_ATTRIB(dev)->block_size;
  530. req->rd_size = task->task_size;
  531. if (task->task_data_direction == DMA_FROM_DEVICE)
  532. ret = rd_MEMCPY_read(req);
  533. else
  534. ret = rd_MEMCPY_write(req);
  535. if (ret != 0)
  536. return ret;
  537. task->task_scsi_status = GOOD;
  538. transport_complete_task(task, 1);
  539. return PYX_TRANSPORT_SENT_TO_TRANSPORT;
  540. }
  541. /* rd_DIRECT_with_offset():
  542. *
  543. *
  544. */
  545. static int rd_DIRECT_with_offset(
  546. struct se_task *task,
  547. struct list_head *se_mem_list,
  548. u32 *se_mem_cnt,
  549. u32 *task_offset)
  550. {
  551. struct rd_request *req = RD_REQ(task);
  552. struct rd_dev *dev = req->rd_dev;
  553. struct rd_dev_sg_table *table;
  554. struct se_mem *se_mem;
  555. struct scatterlist *sg_s;
  556. u32 j = 0, set_offset = 1;
  557. u32 get_next_table = 0, offset_length, table_sg_end;
  558. table = rd_get_sg_table(dev, req->rd_page);
  559. if (!(table))
  560. return -1;
  561. table_sg_end = (table->page_end_offset - req->rd_page);
  562. sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
  563. #ifdef DEBUG_RAMDISK_DR
  564. printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
  565. (task->task_data_direction == DMA_TO_DEVICE) ?
  566. "Write" : "Read",
  567. task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
  568. #endif
  569. while (req->rd_size) {
  570. se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
  571. if (!(se_mem)) {
  572. printk(KERN_ERR "Unable to allocate struct se_mem\n");
  573. return -1;
  574. }
  575. INIT_LIST_HEAD(&se_mem->se_list);
  576. if (set_offset) {
  577. offset_length = sg_s[j].length - req->rd_offset;
  578. if (offset_length > req->rd_size)
  579. offset_length = req->rd_size;
  580. se_mem->se_page = sg_page(&sg_s[j++]);
  581. se_mem->se_off = req->rd_offset;
  582. se_mem->se_len = offset_length;
  583. set_offset = 0;
  584. get_next_table = (j > table_sg_end);
  585. goto check_eot;
  586. }
  587. offset_length = (req->rd_size < req->rd_offset) ?
  588. req->rd_size : req->rd_offset;
  589. se_mem->se_page = sg_page(&sg_s[j]);
  590. se_mem->se_len = offset_length;
  591. set_offset = 1;
  592. check_eot:
  593. #ifdef DEBUG_RAMDISK_DR
  594. printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
  595. " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
  596. req->rd_page, req->rd_size, offset_length, j, se_mem,
  597. se_mem->se_page, se_mem->se_off, se_mem->se_len);
  598. #endif
  599. list_add_tail(&se_mem->se_list, se_mem_list);
  600. (*se_mem_cnt)++;
  601. req->rd_size -= offset_length;
  602. if (!(req->rd_size))
  603. goto out;
  604. if (!set_offset && !get_next_table)
  605. continue;
  606. if (++req->rd_page <= table->page_end_offset) {
  607. #ifdef DEBUG_RAMDISK_DR
  608. printk(KERN_INFO "page: %u in same page table\n",
  609. req->rd_page);
  610. #endif
  611. continue;
  612. }
  613. #ifdef DEBUG_RAMDISK_DR
  614. printk(KERN_INFO "getting new page table for page: %u\n",
  615. req->rd_page);
  616. #endif
  617. table = rd_get_sg_table(dev, req->rd_page);
  618. if (!(table))
  619. return -1;
  620. sg_s = &table->sg_table[j = 0];
  621. }
  622. out:
  623. T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
  624. #ifdef DEBUG_RAMDISK_DR
  625. printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
  626. *se_mem_cnt);
  627. #endif
  628. return 0;
  629. }
  630. /* rd_DIRECT_without_offset():
  631. *
  632. *
  633. */
  634. static int rd_DIRECT_without_offset(
  635. struct se_task *task,
  636. struct list_head *se_mem_list,
  637. u32 *se_mem_cnt,
  638. u32 *task_offset)
  639. {
  640. struct rd_request *req = RD_REQ(task);
  641. struct rd_dev *dev = req->rd_dev;
  642. struct rd_dev_sg_table *table;
  643. struct se_mem *se_mem;
  644. struct scatterlist *sg_s;
  645. u32 length, j = 0;
  646. table = rd_get_sg_table(dev, req->rd_page);
  647. if (!(table))
  648. return -1;
  649. sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
  650. #ifdef DEBUG_RAMDISK_DR
  651. printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
  652. (task->task_data_direction == DMA_TO_DEVICE) ?
  653. "Write" : "Read",
  654. task->task_lba, req->rd_size, req->rd_page);
  655. #endif
  656. while (req->rd_size) {
  657. se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
  658. if (!(se_mem)) {
  659. printk(KERN_ERR "Unable to allocate struct se_mem\n");
  660. return -1;
  661. }
  662. INIT_LIST_HEAD(&se_mem->se_list);
  663. length = (req->rd_size < sg_s[j].length) ?
  664. req->rd_size : sg_s[j].length;
  665. se_mem->se_page = sg_page(&sg_s[j++]);
  666. se_mem->se_len = length;
  667. #ifdef DEBUG_RAMDISK_DR
  668. printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
  669. " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
  670. req->rd_size, j, se_mem, se_mem->se_page,
  671. se_mem->se_off, se_mem->se_len);
  672. #endif
  673. list_add_tail(&se_mem->se_list, se_mem_list);
  674. (*se_mem_cnt)++;
  675. req->rd_size -= length;
  676. if (!(req->rd_size))
  677. goto out;
  678. if (++req->rd_page <= table->page_end_offset) {
  679. #ifdef DEBUG_RAMDISK_DR
  680. printk("page: %u in same page table\n",
  681. req->rd_page);
  682. #endif
  683. continue;
  684. }
  685. #ifdef DEBUG_RAMDISK_DR
  686. printk(KERN_INFO "getting new page table for page: %u\n",
  687. req->rd_page);
  688. #endif
  689. table = rd_get_sg_table(dev, req->rd_page);
  690. if (!(table))
  691. return -1;
  692. sg_s = &table->sg_table[j = 0];
  693. }
  694. out:
  695. T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
  696. #ifdef DEBUG_RAMDISK_DR
  697. printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
  698. *se_mem_cnt);
  699. #endif
  700. return 0;
  701. }
  702. /* rd_DIRECT_do_se_mem_map():
  703. *
  704. *
  705. */
  706. static int rd_DIRECT_do_se_mem_map(
  707. struct se_task *task,
  708. struct list_head *se_mem_list,
  709. void *in_mem,
  710. struct se_mem *in_se_mem,
  711. struct se_mem **out_se_mem,
  712. u32 *se_mem_cnt,
  713. u32 *task_offset_in)
  714. {
  715. struct se_cmd *cmd = task->task_se_cmd;
  716. struct rd_request *req = RD_REQ(task);
  717. u32 task_offset = *task_offset_in;
  718. unsigned long long lba;
  719. int ret;
  720. req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
  721. PAGE_SIZE);
  722. lba = task->task_lba;
  723. req->rd_offset = (do_div(lba,
  724. (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
  725. DEV_ATTRIB(task->se_dev)->block_size;
  726. req->rd_size = task->task_size;
  727. if (req->rd_offset)
  728. ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
  729. task_offset_in);
  730. else
  731. ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
  732. task_offset_in);
  733. if (ret < 0)
  734. return ret;
  735. if (CMD_TFO(cmd)->task_sg_chaining == 0)
  736. return 0;
  737. /*
  738. * Currently prevent writers from multiple HW fabrics doing
  739. * pci_map_sg() to RD_DR's internal scatterlist memory.
  740. */
  741. if (cmd->data_direction == DMA_TO_DEVICE) {
  742. printk(KERN_ERR "DMA_TO_DEVICE not supported for"
  743. " RAMDISK_DR with task_sg_chaining=1\n");
  744. return -1;
  745. }
  746. /*
  747. * Special case for if task_sg_chaining is enabled, then
  748. * we setup struct se_task->task_sg[], as it will be used by
  749. * transport_do_task_sg_chain() for creating chainged SGLs
  750. * across multiple struct se_task->task_sg[].
  751. */
  752. if (!(transport_calc_sg_num(task,
  753. list_entry(T_TASK(cmd)->t_mem_list->next,
  754. struct se_mem, se_list),
  755. task_offset)))
  756. return -1;
  757. return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
  758. list_entry(T_TASK(cmd)->t_mem_list->next,
  759. struct se_mem, se_list),
  760. out_se_mem, se_mem_cnt, task_offset_in);
  761. }
  762. /* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
  763. *
  764. *
  765. */
  766. static int rd_DIRECT_do_task(struct se_task *task)
  767. {
  768. /*
  769. * At this point the locally allocated RD tables have been mapped
  770. * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
  771. */
  772. task->task_scsi_status = GOOD;
  773. transport_complete_task(task, 1);
  774. return PYX_TRANSPORT_SENT_TO_TRANSPORT;
  775. }
  776. /* rd_free_task(): (Part of se_subsystem_api_t template)
  777. *
  778. *
  779. */
  780. static void rd_free_task(struct se_task *task)
  781. {
  782. kfree(RD_REQ(task));
  783. }
  784. enum {
  785. Opt_rd_pages, Opt_err
  786. };
  787. static match_table_t tokens = {
  788. {Opt_rd_pages, "rd_pages=%d"},
  789. {Opt_err, NULL}
  790. };
  791. static ssize_t rd_set_configfs_dev_params(
  792. struct se_hba *hba,
  793. struct se_subsystem_dev *se_dev,
  794. const char *page,
  795. ssize_t count)
  796. {
  797. struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
  798. char *orig, *ptr, *opts;
  799. substring_t args[MAX_OPT_ARGS];
  800. int ret = 0, arg, token;
  801. opts = kstrdup(page, GFP_KERNEL);
  802. if (!opts)
  803. return -ENOMEM;
  804. orig = opts;
  805. while ((ptr = strsep(&opts, ",")) != NULL) {
  806. if (!*ptr)
  807. continue;
  808. token = match_token(ptr, tokens, args);
  809. switch (token) {
  810. case Opt_rd_pages:
  811. match_int(args, &arg);
  812. rd_dev->rd_page_count = arg;
  813. printk(KERN_INFO "RAMDISK: Referencing Page"
  814. " Count: %u\n", rd_dev->rd_page_count);
  815. rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
  816. break;
  817. default:
  818. break;
  819. }
  820. }
  821. kfree(orig);
  822. return (!ret) ? count : ret;
  823. }
  824. static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
  825. {
  826. struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
  827. if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
  828. printk(KERN_INFO "Missing rd_pages= parameter\n");
  829. return -1;
  830. }
  831. return 0;
  832. }
  833. static ssize_t rd_show_configfs_dev_params(
  834. struct se_hba *hba,
  835. struct se_subsystem_dev *se_dev,
  836. char *b)
  837. {
  838. struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
  839. ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
  840. rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
  841. "rd_direct" : "rd_mcp");
  842. bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
  843. " SG_table_count: %u\n", rd_dev->rd_page_count,
  844. PAGE_SIZE, rd_dev->sg_table_count);
  845. return bl;
  846. }
  847. /* rd_get_cdb(): (Part of se_subsystem_api_t template)
  848. *
  849. *
  850. */
  851. static unsigned char *rd_get_cdb(struct se_task *task)
  852. {
  853. struct rd_request *req = RD_REQ(task);
  854. return req->rd_scsi_cdb;
  855. }
  856. static u32 rd_get_device_rev(struct se_device *dev)
  857. {
  858. return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
  859. }
  860. static u32 rd_get_device_type(struct se_device *dev)
  861. {
  862. return TYPE_DISK;
  863. }
  864. static sector_t rd_get_blocks(struct se_device *dev)
  865. {
  866. struct rd_dev *rd_dev = dev->dev_ptr;
  867. unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
  868. DEV_ATTRIB(dev)->block_size) - 1;
  869. return blocks_long;
  870. }
  871. static struct se_subsystem_api rd_dr_template = {
  872. .name = "rd_dr",
  873. .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
  874. .attach_hba = rd_attach_hba,
  875. .detach_hba = rd_detach_hba,
  876. .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
  877. .create_virtdevice = rd_DIRECT_create_virtdevice,
  878. .free_device = rd_free_device,
  879. .alloc_task = rd_alloc_task,
  880. .do_task = rd_DIRECT_do_task,
  881. .free_task = rd_free_task,
  882. .check_configfs_dev_params = rd_check_configfs_dev_params,
  883. .set_configfs_dev_params = rd_set_configfs_dev_params,
  884. .show_configfs_dev_params = rd_show_configfs_dev_params,
  885. .get_cdb = rd_get_cdb,
  886. .get_device_rev = rd_get_device_rev,
  887. .get_device_type = rd_get_device_type,
  888. .get_blocks = rd_get_blocks,
  889. .do_se_mem_map = rd_DIRECT_do_se_mem_map,
  890. };
  891. static struct se_subsystem_api rd_mcp_template = {
  892. .name = "rd_mcp",
  893. .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
  894. .attach_hba = rd_attach_hba,
  895. .detach_hba = rd_detach_hba,
  896. .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
  897. .create_virtdevice = rd_MEMCPY_create_virtdevice,
  898. .free_device = rd_free_device,
  899. .alloc_task = rd_alloc_task,
  900. .do_task = rd_MEMCPY_do_task,
  901. .free_task = rd_free_task,
  902. .check_configfs_dev_params = rd_check_configfs_dev_params,
  903. .set_configfs_dev_params = rd_set_configfs_dev_params,
  904. .show_configfs_dev_params = rd_show_configfs_dev_params,
  905. .get_cdb = rd_get_cdb,
  906. .get_device_rev = rd_get_device_rev,
  907. .get_device_type = rd_get_device_type,
  908. .get_blocks = rd_get_blocks,
  909. };
  910. int __init rd_module_init(void)
  911. {
  912. int ret;
  913. ret = transport_subsystem_register(&rd_dr_template);
  914. if (ret < 0)
  915. return ret;
  916. ret = transport_subsystem_register(&rd_mcp_template);
  917. if (ret < 0) {
  918. transport_subsystem_release(&rd_dr_template);
  919. return ret;
  920. }
  921. return 0;
  922. }
  923. void rd_module_exit(void)
  924. {
  925. transport_subsystem_release(&rd_dr_template);
  926. transport_subsystem_release(&rd_mcp_template);
  927. }