target_core_device.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638
  1. /*******************************************************************************
  2. * Filename: target_core_device.c (based on iscsi_target_device.c)
  3. *
  4. * This file contains the TCM Virtual Device and Disk Transport
  5. * agnostic related functions.
  6. *
  7. * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
  8. * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
  9. * Copyright (c) 2007-2010 Rising Tide Systems
  10. * Copyright (c) 2008-2010 Linux-iSCSI.org
  11. *
  12. * Nicholas A. Bellinger <nab@kernel.org>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  27. *
  28. ******************************************************************************/
  29. #include <linux/net.h>
  30. #include <linux/string.h>
  31. #include <linux/delay.h>
  32. #include <linux/timer.h>
  33. #include <linux/slab.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/kthread.h>
  36. #include <linux/in.h>
  37. #include <net/sock.h>
  38. #include <net/tcp.h>
  39. #include <scsi/scsi.h>
  40. #include <scsi/scsi_device.h>
  41. #include <target/target_core_base.h>
  42. #include <target/target_core_device.h>
  43. #include <target/target_core_tpg.h>
  44. #include <target/target_core_transport.h>
  45. #include <target/target_core_fabric_ops.h>
  46. #include "target_core_alua.h"
  47. #include "target_core_hba.h"
  48. #include "target_core_pr.h"
  49. #include "target_core_ua.h"
  50. static void se_dev_start(struct se_device *dev);
  51. static void se_dev_stop(struct se_device *dev);
  52. static struct se_hba *lun0_hba;
  53. static struct se_subsystem_dev *lun0_su_dev;
  54. /* not static, needed by tpg.c */
  55. struct se_device *g_lun0_dev;
  56. int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  57. {
  58. struct se_lun *se_lun = NULL;
  59. struct se_session *se_sess = se_cmd->se_sess;
  60. struct se_device *dev;
  61. unsigned long flags;
  62. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
  63. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  64. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  65. return -ENODEV;
  66. }
  67. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  68. se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
  69. if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  70. struct se_dev_entry *deve = se_cmd->se_deve;
  71. deve->total_cmds++;
  72. deve->total_bytes += se_cmd->data_length;
  73. if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  74. (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
  75. se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  76. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  77. printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  78. " Access for 0x%08x\n",
  79. se_cmd->se_tfo->get_fabric_name(),
  80. unpacked_lun);
  81. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  82. return -EACCES;
  83. }
  84. if (se_cmd->data_direction == DMA_TO_DEVICE)
  85. deve->write_bytes += se_cmd->data_length;
  86. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  87. deve->read_bytes += se_cmd->data_length;
  88. deve->deve_cmds++;
  89. se_lun = deve->se_lun;
  90. se_cmd->se_lun = deve->se_lun;
  91. se_cmd->pr_res_key = deve->pr_res_key;
  92. se_cmd->orig_fe_lun = unpacked_lun;
  93. se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
  94. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  95. }
  96. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  97. if (!se_lun) {
  98. /*
  99. * Use the se_portal_group->tpg_virt_lun0 to allow for
  100. * REPORT_LUNS, et al to be returned when no active
  101. * MappedLUN=0 exists for this Initiator Port.
  102. */
  103. if (unpacked_lun != 0) {
  104. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  105. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  106. printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  107. " Access for 0x%08x\n",
  108. se_cmd->se_tfo->get_fabric_name(),
  109. unpacked_lun);
  110. return -ENODEV;
  111. }
  112. /*
  113. * Force WRITE PROTECT for virtual LUN 0
  114. */
  115. if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
  116. (se_cmd->data_direction != DMA_NONE)) {
  117. se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  118. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  119. return -EACCES;
  120. }
  121. se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  122. se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  123. se_cmd->orig_fe_lun = 0;
  124. se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
  125. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  126. }
  127. /*
  128. * Determine if the struct se_lun is online.
  129. * FIXME: Check for LUN_RESET + UNIT Attention
  130. */
  131. if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
  132. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  133. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  134. return -ENODEV;
  135. }
  136. /* Directly associate cmd with se_dev */
  137. se_cmd->se_dev = se_lun->lun_se_dev;
  138. /* TODO: get rid of this and use atomics for stats */
  139. dev = se_lun->lun_se_dev;
  140. spin_lock_irqsave(&dev->stats_lock, flags);
  141. dev->num_cmds++;
  142. if (se_cmd->data_direction == DMA_TO_DEVICE)
  143. dev->write_bytes += se_cmd->data_length;
  144. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  145. dev->read_bytes += se_cmd->data_length;
  146. spin_unlock_irqrestore(&dev->stats_lock, flags);
  147. /*
  148. * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
  149. * for tracking state of struct se_cmds during LUN shutdown events.
  150. */
  151. spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
  152. list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
  153. atomic_set(&se_cmd->transport_lun_active, 1);
  154. spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
  155. return 0;
  156. }
  157. EXPORT_SYMBOL(transport_lookup_cmd_lun);
  158. int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  159. {
  160. struct se_dev_entry *deve;
  161. struct se_lun *se_lun = NULL;
  162. struct se_session *se_sess = se_cmd->se_sess;
  163. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  164. unsigned long flags;
  165. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
  166. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  167. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  168. return -ENODEV;
  169. }
  170. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  171. se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
  172. deve = se_cmd->se_deve;
  173. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  174. se_tmr->tmr_lun = deve->se_lun;
  175. se_cmd->se_lun = deve->se_lun;
  176. se_lun = deve->se_lun;
  177. se_cmd->pr_res_key = deve->pr_res_key;
  178. se_cmd->orig_fe_lun = unpacked_lun;
  179. se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
  180. }
  181. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  182. if (!se_lun) {
  183. printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  184. " Access for 0x%08x\n",
  185. se_cmd->se_tfo->get_fabric_name(),
  186. unpacked_lun);
  187. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  188. return -ENODEV;
  189. }
  190. /*
  191. * Determine if the struct se_lun is online.
  192. * FIXME: Check for LUN_RESET + UNIT Attention
  193. */
  194. if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
  195. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  196. return -ENODEV;
  197. }
  198. /* Directly associate cmd with se_dev */
  199. se_cmd->se_dev = se_lun->lun_se_dev;
  200. se_tmr->tmr_dev = se_lun->lun_se_dev;
  201. spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
  202. list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
  203. spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
  204. return 0;
  205. }
  206. EXPORT_SYMBOL(transport_lookup_tmr_lun);
  207. /*
  208. * This function is called from core_scsi3_emulate_pro_register_and_move()
  209. * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
  210. * when a matching rtpi is found.
  211. */
  212. struct se_dev_entry *core_get_se_deve_from_rtpi(
  213. struct se_node_acl *nacl,
  214. u16 rtpi)
  215. {
  216. struct se_dev_entry *deve;
  217. struct se_lun *lun;
  218. struct se_port *port;
  219. struct se_portal_group *tpg = nacl->se_tpg;
  220. u32 i;
  221. spin_lock_irq(&nacl->device_list_lock);
  222. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  223. deve = &nacl->device_list[i];
  224. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  225. continue;
  226. lun = deve->se_lun;
  227. if (!(lun)) {
  228. printk(KERN_ERR "%s device entries device pointer is"
  229. " NULL, but Initiator has access.\n",
  230. tpg->se_tpg_tfo->get_fabric_name());
  231. continue;
  232. }
  233. port = lun->lun_sep;
  234. if (!(port)) {
  235. printk(KERN_ERR "%s device entries device pointer is"
  236. " NULL, but Initiator has access.\n",
  237. tpg->se_tpg_tfo->get_fabric_name());
  238. continue;
  239. }
  240. if (port->sep_rtpi != rtpi)
  241. continue;
  242. atomic_inc(&deve->pr_ref_count);
  243. smp_mb__after_atomic_inc();
  244. spin_unlock_irq(&nacl->device_list_lock);
  245. return deve;
  246. }
  247. spin_unlock_irq(&nacl->device_list_lock);
  248. return NULL;
  249. }
  250. int core_free_device_list_for_node(
  251. struct se_node_acl *nacl,
  252. struct se_portal_group *tpg)
  253. {
  254. struct se_dev_entry *deve;
  255. struct se_lun *lun;
  256. u32 i;
  257. if (!nacl->device_list)
  258. return 0;
  259. spin_lock_irq(&nacl->device_list_lock);
  260. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  261. deve = &nacl->device_list[i];
  262. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  263. continue;
  264. if (!deve->se_lun) {
  265. printk(KERN_ERR "%s device entries device pointer is"
  266. " NULL, but Initiator has access.\n",
  267. tpg->se_tpg_tfo->get_fabric_name());
  268. continue;
  269. }
  270. lun = deve->se_lun;
  271. spin_unlock_irq(&nacl->device_list_lock);
  272. core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
  273. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
  274. spin_lock_irq(&nacl->device_list_lock);
  275. }
  276. spin_unlock_irq(&nacl->device_list_lock);
  277. kfree(nacl->device_list);
  278. nacl->device_list = NULL;
  279. return 0;
  280. }
  281. void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
  282. {
  283. struct se_dev_entry *deve;
  284. spin_lock_irq(&se_nacl->device_list_lock);
  285. deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
  286. deve->deve_cmds--;
  287. spin_unlock_irq(&se_nacl->device_list_lock);
  288. }
  289. void core_update_device_list_access(
  290. u32 mapped_lun,
  291. u32 lun_access,
  292. struct se_node_acl *nacl)
  293. {
  294. struct se_dev_entry *deve;
  295. spin_lock_irq(&nacl->device_list_lock);
  296. deve = &nacl->device_list[mapped_lun];
  297. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  298. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  299. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  300. } else {
  301. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  302. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  303. }
  304. spin_unlock_irq(&nacl->device_list_lock);
  305. }
  306. /* core_update_device_list_for_node():
  307. *
  308. *
  309. */
  310. int core_update_device_list_for_node(
  311. struct se_lun *lun,
  312. struct se_lun_acl *lun_acl,
  313. u32 mapped_lun,
  314. u32 lun_access,
  315. struct se_node_acl *nacl,
  316. struct se_portal_group *tpg,
  317. int enable)
  318. {
  319. struct se_port *port = lun->lun_sep;
  320. struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
  321. int trans = 0;
  322. /*
  323. * If the MappedLUN entry is being disabled, the entry in
  324. * port->sep_alua_list must be removed now before clearing the
  325. * struct se_dev_entry pointers below as logic in
  326. * core_alua_do_transition_tg_pt() depends on these being present.
  327. */
  328. if (!(enable)) {
  329. /*
  330. * deve->se_lun_acl will be NULL for demo-mode created LUNs
  331. * that have not been explicitly concerted to MappedLUNs ->
  332. * struct se_lun_acl, but we remove deve->alua_port_list from
  333. * port->sep_alua_list. This also means that active UAs and
  334. * NodeACL context specific PR metadata for demo-mode
  335. * MappedLUN *deve will be released below..
  336. */
  337. spin_lock_bh(&port->sep_alua_lock);
  338. list_del(&deve->alua_port_list);
  339. spin_unlock_bh(&port->sep_alua_lock);
  340. }
  341. spin_lock_irq(&nacl->device_list_lock);
  342. if (enable) {
  343. /*
  344. * Check if the call is handling demo mode -> explict LUN ACL
  345. * transition. This transition must be for the same struct se_lun
  346. * + mapped_lun that was setup in demo mode..
  347. */
  348. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  349. if (deve->se_lun_acl != NULL) {
  350. printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
  351. " already set for demo mode -> explict"
  352. " LUN ACL transition\n");
  353. spin_unlock_irq(&nacl->device_list_lock);
  354. return -EINVAL;
  355. }
  356. if (deve->se_lun != lun) {
  357. printk(KERN_ERR "struct se_dev_entry->se_lun does"
  358. " match passed struct se_lun for demo mode"
  359. " -> explict LUN ACL transition\n");
  360. spin_unlock_irq(&nacl->device_list_lock);
  361. return -EINVAL;
  362. }
  363. deve->se_lun_acl = lun_acl;
  364. trans = 1;
  365. } else {
  366. deve->se_lun = lun;
  367. deve->se_lun_acl = lun_acl;
  368. deve->mapped_lun = mapped_lun;
  369. deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
  370. }
  371. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  372. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  373. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  374. } else {
  375. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  376. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  377. }
  378. if (trans) {
  379. spin_unlock_irq(&nacl->device_list_lock);
  380. return 0;
  381. }
  382. deve->creation_time = get_jiffies_64();
  383. deve->attach_count++;
  384. spin_unlock_irq(&nacl->device_list_lock);
  385. spin_lock_bh(&port->sep_alua_lock);
  386. list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
  387. spin_unlock_bh(&port->sep_alua_lock);
  388. return 0;
  389. }
  390. /*
  391. * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
  392. * PR operation to complete.
  393. */
  394. spin_unlock_irq(&nacl->device_list_lock);
  395. while (atomic_read(&deve->pr_ref_count) != 0)
  396. cpu_relax();
  397. spin_lock_irq(&nacl->device_list_lock);
  398. /*
  399. * Disable struct se_dev_entry LUN ACL mapping
  400. */
  401. core_scsi3_ua_release_all(deve);
  402. deve->se_lun = NULL;
  403. deve->se_lun_acl = NULL;
  404. deve->lun_flags = 0;
  405. deve->creation_time = 0;
  406. deve->attach_count--;
  407. spin_unlock_irq(&nacl->device_list_lock);
  408. core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
  409. return 0;
  410. }
  411. /* core_clear_lun_from_tpg():
  412. *
  413. *
  414. */
  415. void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
  416. {
  417. struct se_node_acl *nacl;
  418. struct se_dev_entry *deve;
  419. u32 i;
  420. spin_lock_bh(&tpg->acl_node_lock);
  421. list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
  422. spin_unlock_bh(&tpg->acl_node_lock);
  423. spin_lock_irq(&nacl->device_list_lock);
  424. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  425. deve = &nacl->device_list[i];
  426. if (lun != deve->se_lun)
  427. continue;
  428. spin_unlock_irq(&nacl->device_list_lock);
  429. core_update_device_list_for_node(lun, NULL,
  430. deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
  431. nacl, tpg, 0);
  432. spin_lock_irq(&nacl->device_list_lock);
  433. }
  434. spin_unlock_irq(&nacl->device_list_lock);
  435. spin_lock_bh(&tpg->acl_node_lock);
  436. }
  437. spin_unlock_bh(&tpg->acl_node_lock);
  438. }
  439. static struct se_port *core_alloc_port(struct se_device *dev)
  440. {
  441. struct se_port *port, *port_tmp;
  442. port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
  443. if (!(port)) {
  444. printk(KERN_ERR "Unable to allocate struct se_port\n");
  445. return ERR_PTR(-ENOMEM);
  446. }
  447. INIT_LIST_HEAD(&port->sep_alua_list);
  448. INIT_LIST_HEAD(&port->sep_list);
  449. atomic_set(&port->sep_tg_pt_secondary_offline, 0);
  450. spin_lock_init(&port->sep_alua_lock);
  451. mutex_init(&port->sep_tg_pt_md_mutex);
  452. spin_lock(&dev->se_port_lock);
  453. if (dev->dev_port_count == 0x0000ffff) {
  454. printk(KERN_WARNING "Reached dev->dev_port_count =="
  455. " 0x0000ffff\n");
  456. spin_unlock(&dev->se_port_lock);
  457. return ERR_PTR(-ENOSPC);
  458. }
  459. again:
  460. /*
  461. * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
  462. * Here is the table from spc4r17 section 7.7.3.8.
  463. *
  464. * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
  465. *
  466. * Code Description
  467. * 0h Reserved
  468. * 1h Relative port 1, historically known as port A
  469. * 2h Relative port 2, historically known as port B
  470. * 3h to FFFFh Relative port 3 through 65 535
  471. */
  472. port->sep_rtpi = dev->dev_rpti_counter++;
  473. if (!(port->sep_rtpi))
  474. goto again;
  475. list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
  476. /*
  477. * Make sure RELATIVE TARGET PORT IDENTIFER is unique
  478. * for 16-bit wrap..
  479. */
  480. if (port->sep_rtpi == port_tmp->sep_rtpi)
  481. goto again;
  482. }
  483. spin_unlock(&dev->se_port_lock);
  484. return port;
  485. }
  486. static void core_export_port(
  487. struct se_device *dev,
  488. struct se_portal_group *tpg,
  489. struct se_port *port,
  490. struct se_lun *lun)
  491. {
  492. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  493. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
  494. spin_lock(&dev->se_port_lock);
  495. spin_lock(&lun->lun_sep_lock);
  496. port->sep_tpg = tpg;
  497. port->sep_lun = lun;
  498. lun->lun_sep = port;
  499. spin_unlock(&lun->lun_sep_lock);
  500. list_add_tail(&port->sep_list, &dev->dev_sep_list);
  501. spin_unlock(&dev->se_port_lock);
  502. if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
  503. tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
  504. if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
  505. printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
  506. "_gp_member_t\n");
  507. return;
  508. }
  509. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  510. __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
  511. su_dev->t10_alua.default_tg_pt_gp);
  512. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  513. printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
  514. " Group: alua/default_tg_pt_gp\n",
  515. dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
  516. }
  517. dev->dev_port_count++;
  518. port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
  519. }
  520. /*
  521. * Called with struct se_device->se_port_lock spinlock held.
  522. */
  523. static void core_release_port(struct se_device *dev, struct se_port *port)
  524. __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
  525. {
  526. /*
  527. * Wait for any port reference for PR ALL_TG_PT=1 operation
  528. * to complete in __core_scsi3_alloc_registration()
  529. */
  530. spin_unlock(&dev->se_port_lock);
  531. if (atomic_read(&port->sep_tg_pt_ref_cnt))
  532. cpu_relax();
  533. spin_lock(&dev->se_port_lock);
  534. core_alua_free_tg_pt_gp_mem(port);
  535. list_del(&port->sep_list);
  536. dev->dev_port_count--;
  537. kfree(port);
  538. }
  539. int core_dev_export(
  540. struct se_device *dev,
  541. struct se_portal_group *tpg,
  542. struct se_lun *lun)
  543. {
  544. struct se_port *port;
  545. port = core_alloc_port(dev);
  546. if (IS_ERR(port))
  547. return PTR_ERR(port);
  548. lun->lun_se_dev = dev;
  549. se_dev_start(dev);
  550. atomic_inc(&dev->dev_export_obj.obj_access_count);
  551. core_export_port(dev, tpg, port, lun);
  552. return 0;
  553. }
  554. void core_dev_unexport(
  555. struct se_device *dev,
  556. struct se_portal_group *tpg,
  557. struct se_lun *lun)
  558. {
  559. struct se_port *port = lun->lun_sep;
  560. spin_lock(&lun->lun_sep_lock);
  561. if (lun->lun_se_dev == NULL) {
  562. spin_unlock(&lun->lun_sep_lock);
  563. return;
  564. }
  565. spin_unlock(&lun->lun_sep_lock);
  566. spin_lock(&dev->se_port_lock);
  567. atomic_dec(&dev->dev_export_obj.obj_access_count);
  568. core_release_port(dev, port);
  569. spin_unlock(&dev->se_port_lock);
  570. se_dev_stop(dev);
  571. lun->lun_se_dev = NULL;
  572. }
  573. int transport_core_report_lun_response(struct se_cmd *se_cmd)
  574. {
  575. struct se_dev_entry *deve;
  576. struct se_lun *se_lun;
  577. struct se_session *se_sess = se_cmd->se_sess;
  578. struct se_task *se_task;
  579. unsigned char *buf = se_cmd->t_task_buf;
  580. u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
  581. list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
  582. break;
  583. if (!(se_task)) {
  584. printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
  585. return PYX_TRANSPORT_LU_COMM_FAILURE;
  586. }
  587. /*
  588. * If no struct se_session pointer is present, this struct se_cmd is
  589. * coming via a target_core_mod PASSTHROUGH op, and not through
  590. * a $FABRIC_MOD. In that case, report LUN=0 only.
  591. */
  592. if (!(se_sess)) {
  593. int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
  594. lun_count = 1;
  595. goto done;
  596. }
  597. spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
  598. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  599. deve = &se_sess->se_node_acl->device_list[i];
  600. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  601. continue;
  602. se_lun = deve->se_lun;
  603. /*
  604. * We determine the correct LUN LIST LENGTH even once we
  605. * have reached the initial allocation length.
  606. * See SPC2-R20 7.19.
  607. */
  608. lun_count++;
  609. if ((cdb_offset + 8) >= se_cmd->data_length)
  610. continue;
  611. int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
  612. offset += 8;
  613. cdb_offset += 8;
  614. }
  615. spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
  616. /*
  617. * See SPC3 r07, page 159.
  618. */
  619. done:
  620. lun_count *= 8;
  621. buf[0] = ((lun_count >> 24) & 0xff);
  622. buf[1] = ((lun_count >> 16) & 0xff);
  623. buf[2] = ((lun_count >> 8) & 0xff);
  624. buf[3] = (lun_count & 0xff);
  625. return PYX_TRANSPORT_SENT_TO_TRANSPORT;
  626. }
  627. /* se_release_device_for_hba():
  628. *
  629. *
  630. */
  631. void se_release_device_for_hba(struct se_device *dev)
  632. {
  633. struct se_hba *hba = dev->se_hba;
  634. if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
  635. (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
  636. (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
  637. (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
  638. (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
  639. se_dev_stop(dev);
  640. if (dev->dev_ptr) {
  641. kthread_stop(dev->process_thread);
  642. if (dev->transport->free_device)
  643. dev->transport->free_device(dev->dev_ptr);
  644. }
  645. spin_lock(&hba->device_lock);
  646. list_del(&dev->dev_list);
  647. hba->dev_count--;
  648. spin_unlock(&hba->device_lock);
  649. core_scsi3_free_all_registrations(dev);
  650. se_release_vpd_for_dev(dev);
  651. kfree(dev);
  652. }
  653. void se_release_vpd_for_dev(struct se_device *dev)
  654. {
  655. struct t10_vpd *vpd, *vpd_tmp;
  656. spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
  657. list_for_each_entry_safe(vpd, vpd_tmp,
  658. &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
  659. list_del(&vpd->vpd_list);
  660. kfree(vpd);
  661. }
  662. spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
  663. }
  664. /* se_free_virtual_device():
  665. *
  666. * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
  667. */
  668. int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
  669. {
  670. if (!list_empty(&dev->dev_sep_list))
  671. dump_stack();
  672. core_alua_free_lu_gp_mem(dev);
  673. se_release_device_for_hba(dev);
  674. return 0;
  675. }
  676. static void se_dev_start(struct se_device *dev)
  677. {
  678. struct se_hba *hba = dev->se_hba;
  679. spin_lock(&hba->device_lock);
  680. atomic_inc(&dev->dev_obj.obj_access_count);
  681. if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
  682. if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
  683. dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
  684. dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
  685. } else if (dev->dev_status &
  686. TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
  687. dev->dev_status &=
  688. ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
  689. dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
  690. }
  691. }
  692. spin_unlock(&hba->device_lock);
  693. }
  694. static void se_dev_stop(struct se_device *dev)
  695. {
  696. struct se_hba *hba = dev->se_hba;
  697. spin_lock(&hba->device_lock);
  698. atomic_dec(&dev->dev_obj.obj_access_count);
  699. if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
  700. if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
  701. dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
  702. dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
  703. } else if (dev->dev_status &
  704. TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
  705. dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
  706. dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
  707. }
  708. }
  709. spin_unlock(&hba->device_lock);
  710. }
  711. int se_dev_check_online(struct se_device *dev)
  712. {
  713. unsigned long flags;
  714. int ret;
  715. spin_lock_irqsave(&dev->dev_status_lock, flags);
  716. ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
  717. (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
  718. spin_unlock_irqrestore(&dev->dev_status_lock, flags);
  719. return ret;
  720. }
  721. int se_dev_check_shutdown(struct se_device *dev)
  722. {
  723. int ret;
  724. spin_lock_irq(&dev->dev_status_lock);
  725. ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
  726. spin_unlock_irq(&dev->dev_status_lock);
  727. return ret;
  728. }
  729. void se_dev_set_default_attribs(
  730. struct se_device *dev,
  731. struct se_dev_limits *dev_limits)
  732. {
  733. struct queue_limits *limits = &dev_limits->limits;
  734. dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
  735. dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
  736. dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
  737. dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
  738. dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
  739. dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
  740. dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
  741. dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
  742. dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
  743. dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
  744. dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
  745. /*
  746. * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
  747. * iblock_create_virtdevice() from struct queue_limits values
  748. * if blk_queue_discard()==1
  749. */
  750. dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
  751. dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
  752. DA_MAX_UNMAP_BLOCK_DESC_COUNT;
  753. dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
  754. dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
  755. DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
  756. /*
  757. * block_size is based on subsystem plugin dependent requirements.
  758. */
  759. dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
  760. dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
  761. /*
  762. * max_sectors is based on subsystem plugin dependent requirements.
  763. */
  764. dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
  765. dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
  766. /*
  767. * Set optimal_sectors from max_sectors, which can be lowered via
  768. * configfs.
  769. */
  770. dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
  771. /*
  772. * queue_depth is based on subsystem plugin dependent requirements.
  773. */
  774. dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
  775. dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
  776. }
  777. int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
  778. {
  779. if (task_timeout > DA_TASK_TIMEOUT_MAX) {
  780. printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
  781. " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
  782. return -EINVAL;
  783. } else {
  784. dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
  785. printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
  786. dev, task_timeout);
  787. }
  788. return 0;
  789. }
  790. int se_dev_set_max_unmap_lba_count(
  791. struct se_device *dev,
  792. u32 max_unmap_lba_count)
  793. {
  794. dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
  795. printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
  796. dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
  797. return 0;
  798. }
  799. int se_dev_set_max_unmap_block_desc_count(
  800. struct se_device *dev,
  801. u32 max_unmap_block_desc_count)
  802. {
  803. dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
  804. max_unmap_block_desc_count;
  805. printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
  806. dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
  807. return 0;
  808. }
  809. int se_dev_set_unmap_granularity(
  810. struct se_device *dev,
  811. u32 unmap_granularity)
  812. {
  813. dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
  814. printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
  815. dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
  816. return 0;
  817. }
  818. int se_dev_set_unmap_granularity_alignment(
  819. struct se_device *dev,
  820. u32 unmap_granularity_alignment)
  821. {
  822. dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
  823. printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
  824. dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
  825. return 0;
  826. }
  827. int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
  828. {
  829. if ((flag != 0) && (flag != 1)) {
  830. printk(KERN_ERR "Illegal value %d\n", flag);
  831. return -EINVAL;
  832. }
  833. if (dev->transport->dpo_emulated == NULL) {
  834. printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n");
  835. return -EINVAL;
  836. }
  837. if (dev->transport->dpo_emulated(dev) == 0) {
  838. printk(KERN_ERR "dev->transport->dpo_emulated not supported\n");
  839. return -EINVAL;
  840. }
  841. dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
  842. printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
  843. " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
  844. return 0;
  845. }
  846. int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
  847. {
  848. if ((flag != 0) && (flag != 1)) {
  849. printk(KERN_ERR "Illegal value %d\n", flag);
  850. return -EINVAL;
  851. }
  852. if (dev->transport->fua_write_emulated == NULL) {
  853. printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n");
  854. return -EINVAL;
  855. }
  856. if (dev->transport->fua_write_emulated(dev) == 0) {
  857. printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n");
  858. return -EINVAL;
  859. }
  860. dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
  861. printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
  862. dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
  863. return 0;
  864. }
  865. int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
  866. {
  867. if ((flag != 0) && (flag != 1)) {
  868. printk(KERN_ERR "Illegal value %d\n", flag);
  869. return -EINVAL;
  870. }
  871. if (dev->transport->fua_read_emulated == NULL) {
  872. printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n");
  873. return -EINVAL;
  874. }
  875. if (dev->transport->fua_read_emulated(dev) == 0) {
  876. printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n");
  877. return -EINVAL;
  878. }
  879. dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
  880. printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
  881. dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
  882. return 0;
  883. }
  884. int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
  885. {
  886. if ((flag != 0) && (flag != 1)) {
  887. printk(KERN_ERR "Illegal value %d\n", flag);
  888. return -EINVAL;
  889. }
  890. if (dev->transport->write_cache_emulated == NULL) {
  891. printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n");
  892. return -EINVAL;
  893. }
  894. if (dev->transport->write_cache_emulated(dev) == 0) {
  895. printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n");
  896. return -EINVAL;
  897. }
  898. dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
  899. printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
  900. dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
  901. return 0;
  902. }
  903. int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
  904. {
  905. if ((flag != 0) && (flag != 1) && (flag != 2)) {
  906. printk(KERN_ERR "Illegal value %d\n", flag);
  907. return -EINVAL;
  908. }
  909. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  910. printk(KERN_ERR "dev[%p]: Unable to change SE Device"
  911. " UA_INTRLCK_CTRL while dev_export_obj: %d count"
  912. " exists\n", dev,
  913. atomic_read(&dev->dev_export_obj.obj_access_count));
  914. return -EINVAL;
  915. }
  916. dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
  917. printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
  918. dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
  919. return 0;
  920. }
  921. int se_dev_set_emulate_tas(struct se_device *dev, int flag)
  922. {
  923. if ((flag != 0) && (flag != 1)) {
  924. printk(KERN_ERR "Illegal value %d\n", flag);
  925. return -EINVAL;
  926. }
  927. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  928. printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
  929. " dev_export_obj: %d count exists\n", dev,
  930. atomic_read(&dev->dev_export_obj.obj_access_count));
  931. return -EINVAL;
  932. }
  933. dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
  934. printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
  935. dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
  936. return 0;
  937. }
  938. int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
  939. {
  940. if ((flag != 0) && (flag != 1)) {
  941. printk(KERN_ERR "Illegal value %d\n", flag);
  942. return -EINVAL;
  943. }
  944. /*
  945. * We expect this value to be non-zero when generic Block Layer
  946. * Discard supported is detected iblock_create_virtdevice().
  947. */
  948. if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
  949. printk(KERN_ERR "Generic Block Discard not supported\n");
  950. return -ENOSYS;
  951. }
  952. dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
  953. printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
  954. dev, flag);
  955. return 0;
  956. }
  957. int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
  958. {
  959. if ((flag != 0) && (flag != 1)) {
  960. printk(KERN_ERR "Illegal value %d\n", flag);
  961. return -EINVAL;
  962. }
  963. /*
  964. * We expect this value to be non-zero when generic Block Layer
  965. * Discard supported is detected iblock_create_virtdevice().
  966. */
  967. if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
  968. printk(KERN_ERR "Generic Block Discard not supported\n");
  969. return -ENOSYS;
  970. }
  971. dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
  972. printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
  973. dev, flag);
  974. return 0;
  975. }
  976. int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
  977. {
  978. if ((flag != 0) && (flag != 1)) {
  979. printk(KERN_ERR "Illegal value %d\n", flag);
  980. return -EINVAL;
  981. }
  982. dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
  983. printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
  984. (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
  985. return 0;
  986. }
  987. /*
  988. * Note, this can only be called on unexported SE Device Object.
  989. */
  990. int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
  991. {
  992. u32 orig_queue_depth = dev->queue_depth;
  993. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  994. printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
  995. " dev_export_obj: %d count exists\n", dev,
  996. atomic_read(&dev->dev_export_obj.obj_access_count));
  997. return -EINVAL;
  998. }
  999. if (!(queue_depth)) {
  1000. printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
  1001. "_depth\n", dev);
  1002. return -EINVAL;
  1003. }
  1004. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1005. if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
  1006. printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
  1007. " exceeds TCM/SE_Device TCQ: %u\n",
  1008. dev, queue_depth,
  1009. dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
  1010. return -EINVAL;
  1011. }
  1012. } else {
  1013. if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
  1014. if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
  1015. printk(KERN_ERR "dev[%p]: Passed queue_depth:"
  1016. " %u exceeds TCM/SE_Device MAX"
  1017. " TCQ: %u\n", dev, queue_depth,
  1018. dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
  1019. return -EINVAL;
  1020. }
  1021. }
  1022. }
  1023. dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
  1024. if (queue_depth > orig_queue_depth)
  1025. atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
  1026. else if (queue_depth < orig_queue_depth)
  1027. atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
  1028. printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
  1029. dev, queue_depth);
  1030. return 0;
  1031. }
  1032. int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
  1033. {
  1034. int force = 0; /* Force setting for VDEVS */
  1035. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1036. printk(KERN_ERR "dev[%p]: Unable to change SE Device"
  1037. " max_sectors while dev_export_obj: %d count exists\n",
  1038. dev, atomic_read(&dev->dev_export_obj.obj_access_count));
  1039. return -EINVAL;
  1040. }
  1041. if (!(max_sectors)) {
  1042. printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
  1043. " max_sectors\n", dev);
  1044. return -EINVAL;
  1045. }
  1046. if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
  1047. printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
  1048. " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
  1049. DA_STATUS_MAX_SECTORS_MIN);
  1050. return -EINVAL;
  1051. }
  1052. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1053. if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
  1054. printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
  1055. " greater than TCM/SE_Device max_sectors:"
  1056. " %u\n", dev, max_sectors,
  1057. dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
  1058. return -EINVAL;
  1059. }
  1060. } else {
  1061. if (!(force) && (max_sectors >
  1062. dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
  1063. printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
  1064. " greater than TCM/SE_Device max_sectors"
  1065. ": %u, use force=1 to override.\n", dev,
  1066. max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
  1067. return -EINVAL;
  1068. }
  1069. if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
  1070. printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
  1071. " greater than DA_STATUS_MAX_SECTORS_MAX:"
  1072. " %u\n", dev, max_sectors,
  1073. DA_STATUS_MAX_SECTORS_MAX);
  1074. return -EINVAL;
  1075. }
  1076. }
  1077. dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
  1078. printk("dev[%p]: SE Device max_sectors changed to %u\n",
  1079. dev, max_sectors);
  1080. return 0;
  1081. }
  1082. int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
  1083. {
  1084. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1085. printk(KERN_ERR "dev[%p]: Unable to change SE Device"
  1086. " optimal_sectors while dev_export_obj: %d count exists\n",
  1087. dev, atomic_read(&dev->dev_export_obj.obj_access_count));
  1088. return -EINVAL;
  1089. }
  1090. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1091. printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
  1092. " changed for TCM/pSCSI\n", dev);
  1093. return -EINVAL;
  1094. }
  1095. if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
  1096. printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
  1097. " greater than max_sectors: %u\n", dev,
  1098. optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
  1099. return -EINVAL;
  1100. }
  1101. dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
  1102. printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
  1103. dev, optimal_sectors);
  1104. return 0;
  1105. }
  1106. int se_dev_set_block_size(struct se_device *dev, u32 block_size)
  1107. {
  1108. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1109. printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
  1110. " while dev_export_obj: %d count exists\n", dev,
  1111. atomic_read(&dev->dev_export_obj.obj_access_count));
  1112. return -EINVAL;
  1113. }
  1114. if ((block_size != 512) &&
  1115. (block_size != 1024) &&
  1116. (block_size != 2048) &&
  1117. (block_size != 4096)) {
  1118. printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
  1119. " for SE device, must be 512, 1024, 2048 or 4096\n",
  1120. dev, block_size);
  1121. return -EINVAL;
  1122. }
  1123. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1124. printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
  1125. " Physical Device, use for Linux/SCSI to change"
  1126. " block_size for underlying hardware\n", dev);
  1127. return -EINVAL;
  1128. }
  1129. dev->se_sub_dev->se_dev_attrib.block_size = block_size;
  1130. printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
  1131. dev, block_size);
  1132. return 0;
  1133. }
  1134. struct se_lun *core_dev_add_lun(
  1135. struct se_portal_group *tpg,
  1136. struct se_hba *hba,
  1137. struct se_device *dev,
  1138. u32 lun)
  1139. {
  1140. struct se_lun *lun_p;
  1141. u32 lun_access = 0;
  1142. if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
  1143. printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
  1144. atomic_read(&dev->dev_access_obj.obj_access_count));
  1145. return NULL;
  1146. }
  1147. lun_p = core_tpg_pre_addlun(tpg, lun);
  1148. if ((IS_ERR(lun_p)) || !(lun_p))
  1149. return NULL;
  1150. if (dev->dev_flags & DF_READ_ONLY)
  1151. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  1152. else
  1153. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  1154. if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
  1155. return NULL;
  1156. printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
  1157. " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1158. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
  1159. tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
  1160. /*
  1161. * Update LUN maps for dynamically added initiators when
  1162. * generate_node_acl is enabled.
  1163. */
  1164. if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
  1165. struct se_node_acl *acl;
  1166. spin_lock_bh(&tpg->acl_node_lock);
  1167. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  1168. if (acl->dynamic_node_acl) {
  1169. spin_unlock_bh(&tpg->acl_node_lock);
  1170. core_tpg_add_node_to_devs(acl, tpg);
  1171. spin_lock_bh(&tpg->acl_node_lock);
  1172. }
  1173. }
  1174. spin_unlock_bh(&tpg->acl_node_lock);
  1175. }
  1176. return lun_p;
  1177. }
  1178. /* core_dev_del_lun():
  1179. *
  1180. *
  1181. */
  1182. int core_dev_del_lun(
  1183. struct se_portal_group *tpg,
  1184. u32 unpacked_lun)
  1185. {
  1186. struct se_lun *lun;
  1187. int ret = 0;
  1188. lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
  1189. if (!(lun))
  1190. return ret;
  1191. core_tpg_post_dellun(tpg, lun);
  1192. printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
  1193. " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
  1194. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
  1195. tpg->se_tpg_tfo->get_fabric_name());
  1196. return 0;
  1197. }
  1198. struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
  1199. {
  1200. struct se_lun *lun;
  1201. spin_lock(&tpg->tpg_lun_lock);
  1202. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1203. printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
  1204. "_PER_TPG-1: %u for Target Portal Group: %hu\n",
  1205. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1206. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1207. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1208. spin_unlock(&tpg->tpg_lun_lock);
  1209. return NULL;
  1210. }
  1211. lun = &tpg->tpg_lun_list[unpacked_lun];
  1212. if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
  1213. printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
  1214. " Target Portal Group: %hu, ignoring request.\n",
  1215. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1216. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1217. spin_unlock(&tpg->tpg_lun_lock);
  1218. return NULL;
  1219. }
  1220. spin_unlock(&tpg->tpg_lun_lock);
  1221. return lun;
  1222. }
  1223. /* core_dev_get_lun():
  1224. *
  1225. *
  1226. */
  1227. static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
  1228. {
  1229. struct se_lun *lun;
  1230. spin_lock(&tpg->tpg_lun_lock);
  1231. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1232. printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
  1233. "_TPG-1: %u for Target Portal Group: %hu\n",
  1234. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1235. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1236. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1237. spin_unlock(&tpg->tpg_lun_lock);
  1238. return NULL;
  1239. }
  1240. lun = &tpg->tpg_lun_list[unpacked_lun];
  1241. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  1242. printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
  1243. " Target Portal Group: %hu, ignoring request.\n",
  1244. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1245. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1246. spin_unlock(&tpg->tpg_lun_lock);
  1247. return NULL;
  1248. }
  1249. spin_unlock(&tpg->tpg_lun_lock);
  1250. return lun;
  1251. }
  1252. struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
  1253. struct se_portal_group *tpg,
  1254. u32 mapped_lun,
  1255. char *initiatorname,
  1256. int *ret)
  1257. {
  1258. struct se_lun_acl *lacl;
  1259. struct se_node_acl *nacl;
  1260. if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
  1261. printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
  1262. tpg->se_tpg_tfo->get_fabric_name());
  1263. *ret = -EOVERFLOW;
  1264. return NULL;
  1265. }
  1266. nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
  1267. if (!(nacl)) {
  1268. *ret = -EINVAL;
  1269. return NULL;
  1270. }
  1271. lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
  1272. if (!(lacl)) {
  1273. printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
  1274. *ret = -ENOMEM;
  1275. return NULL;
  1276. }
  1277. INIT_LIST_HEAD(&lacl->lacl_list);
  1278. lacl->mapped_lun = mapped_lun;
  1279. lacl->se_lun_nacl = nacl;
  1280. snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  1281. return lacl;
  1282. }
  1283. int core_dev_add_initiator_node_lun_acl(
  1284. struct se_portal_group *tpg,
  1285. struct se_lun_acl *lacl,
  1286. u32 unpacked_lun,
  1287. u32 lun_access)
  1288. {
  1289. struct se_lun *lun;
  1290. struct se_node_acl *nacl;
  1291. lun = core_dev_get_lun(tpg, unpacked_lun);
  1292. if (!(lun)) {
  1293. printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
  1294. " Target Portal Group: %hu, ignoring request.\n",
  1295. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1296. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1297. return -EINVAL;
  1298. }
  1299. nacl = lacl->se_lun_nacl;
  1300. if (!(nacl))
  1301. return -EINVAL;
  1302. if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
  1303. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
  1304. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  1305. lacl->se_lun = lun;
  1306. if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
  1307. lun_access, nacl, tpg, 1) < 0)
  1308. return -EINVAL;
  1309. spin_lock(&lun->lun_acl_lock);
  1310. list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
  1311. atomic_inc(&lun->lun_acl_count);
  1312. smp_mb__after_atomic_inc();
  1313. spin_unlock(&lun->lun_acl_lock);
  1314. printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
  1315. " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  1316. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
  1317. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
  1318. lacl->initiatorname);
  1319. /*
  1320. * Check to see if there are any existing persistent reservation APTPL
  1321. * pre-registrations that need to be enabled for this LUN ACL..
  1322. */
  1323. core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
  1324. return 0;
  1325. }
  1326. /* core_dev_del_initiator_node_lun_acl():
  1327. *
  1328. *
  1329. */
  1330. int core_dev_del_initiator_node_lun_acl(
  1331. struct se_portal_group *tpg,
  1332. struct se_lun *lun,
  1333. struct se_lun_acl *lacl)
  1334. {
  1335. struct se_node_acl *nacl;
  1336. nacl = lacl->se_lun_nacl;
  1337. if (!(nacl))
  1338. return -EINVAL;
  1339. spin_lock(&lun->lun_acl_lock);
  1340. list_del(&lacl->lacl_list);
  1341. atomic_dec(&lun->lun_acl_count);
  1342. smp_mb__after_atomic_dec();
  1343. spin_unlock(&lun->lun_acl_lock);
  1344. core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
  1345. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
  1346. lacl->se_lun = NULL;
  1347. printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
  1348. " InitiatorNode: %s Mapped LUN: %u\n",
  1349. tpg->se_tpg_tfo->get_fabric_name(),
  1350. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  1351. lacl->initiatorname, lacl->mapped_lun);
  1352. return 0;
  1353. }
  1354. void core_dev_free_initiator_node_lun_acl(
  1355. struct se_portal_group *tpg,
  1356. struct se_lun_acl *lacl)
  1357. {
  1358. printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
  1359. " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1360. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  1361. tpg->se_tpg_tfo->get_fabric_name(),
  1362. lacl->initiatorname, lacl->mapped_lun);
  1363. kfree(lacl);
  1364. }
  1365. int core_dev_setup_virtual_lun0(void)
  1366. {
  1367. struct se_hba *hba;
  1368. struct se_device *dev;
  1369. struct se_subsystem_dev *se_dev = NULL;
  1370. struct se_subsystem_api *t;
  1371. char buf[16];
  1372. int ret;
  1373. hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
  1374. if (IS_ERR(hba))
  1375. return PTR_ERR(hba);
  1376. lun0_hba = hba;
  1377. t = hba->transport;
  1378. se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
  1379. if (!(se_dev)) {
  1380. printk(KERN_ERR "Unable to allocate memory for"
  1381. " struct se_subsystem_dev\n");
  1382. ret = -ENOMEM;
  1383. goto out;
  1384. }
  1385. INIT_LIST_HEAD(&se_dev->se_dev_node);
  1386. INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
  1387. spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
  1388. INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
  1389. INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
  1390. spin_lock_init(&se_dev->t10_pr.registration_lock);
  1391. spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
  1392. INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
  1393. spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
  1394. spin_lock_init(&se_dev->se_dev_lock);
  1395. se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
  1396. se_dev->t10_wwn.t10_sub_dev = se_dev;
  1397. se_dev->t10_alua.t10_sub_dev = se_dev;
  1398. se_dev->se_dev_attrib.da_sub_dev = se_dev;
  1399. se_dev->se_dev_hba = hba;
  1400. se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
  1401. if (!(se_dev->se_dev_su_ptr)) {
  1402. printk(KERN_ERR "Unable to locate subsystem dependent pointer"
  1403. " from allocate_virtdevice()\n");
  1404. ret = -ENOMEM;
  1405. goto out;
  1406. }
  1407. lun0_su_dev = se_dev;
  1408. memset(buf, 0, 16);
  1409. sprintf(buf, "rd_pages=8");
  1410. t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
  1411. dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
  1412. if (IS_ERR(dev)) {
  1413. ret = PTR_ERR(dev);
  1414. goto out;
  1415. }
  1416. se_dev->se_dev_ptr = dev;
  1417. g_lun0_dev = dev;
  1418. return 0;
  1419. out:
  1420. lun0_su_dev = NULL;
  1421. kfree(se_dev);
  1422. if (lun0_hba) {
  1423. core_delete_hba(lun0_hba);
  1424. lun0_hba = NULL;
  1425. }
  1426. return ret;
  1427. }
  1428. void core_dev_release_virtual_lun0(void)
  1429. {
  1430. struct se_hba *hba = lun0_hba;
  1431. struct se_subsystem_dev *su_dev = lun0_su_dev;
  1432. if (!(hba))
  1433. return;
  1434. if (g_lun0_dev)
  1435. se_free_virtual_device(g_lun0_dev, hba);
  1436. kfree(su_dev);
  1437. core_delete_hba(hba);
  1438. }