target_core_device.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656
  1. /*******************************************************************************
  2. * Filename: target_core_device.c (based on iscsi_target_device.c)
  3. *
  4. * This file contains the TCM Virtual Device and Disk Transport
  5. * agnostic related functions.
  6. *
  7. * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
  8. * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
  9. * Copyright (c) 2007-2010 Rising Tide Systems
  10. * Copyright (c) 2008-2010 Linux-iSCSI.org
  11. *
  12. * Nicholas A. Bellinger <nab@kernel.org>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  27. *
  28. ******************************************************************************/
  29. #include <linux/net.h>
  30. #include <linux/string.h>
  31. #include <linux/delay.h>
  32. #include <linux/timer.h>
  33. #include <linux/slab.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/kthread.h>
  36. #include <linux/in.h>
  37. #include <linux/export.h>
  38. #include <net/sock.h>
  39. #include <net/tcp.h>
  40. #include <scsi/scsi.h>
  41. #include <scsi/scsi_device.h>
  42. #include <target/target_core_base.h>
  43. #include <target/target_core_device.h>
  44. #include <target/target_core_tpg.h>
  45. #include <target/target_core_transport.h>
  46. #include <target/target_core_fabric_ops.h>
  47. #include "target_core_alua.h"
  48. #include "target_core_hba.h"
  49. #include "target_core_pr.h"
  50. #include "target_core_ua.h"
  51. static void se_dev_start(struct se_device *dev);
  52. static void se_dev_stop(struct se_device *dev);
  53. static struct se_hba *lun0_hba;
  54. static struct se_subsystem_dev *lun0_su_dev;
  55. /* not static, needed by tpg.c */
  56. struct se_device *g_lun0_dev;
  57. int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  58. {
  59. struct se_lun *se_lun = NULL;
  60. struct se_session *se_sess = se_cmd->se_sess;
  61. struct se_device *dev;
  62. unsigned long flags;
  63. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
  64. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  65. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  66. return -ENODEV;
  67. }
  68. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  69. se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
  70. if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  71. struct se_dev_entry *deve = se_cmd->se_deve;
  72. deve->total_cmds++;
  73. deve->total_bytes += se_cmd->data_length;
  74. if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  75. (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
  76. se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  77. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  78. pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  79. " Access for 0x%08x\n",
  80. se_cmd->se_tfo->get_fabric_name(),
  81. unpacked_lun);
  82. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  83. return -EACCES;
  84. }
  85. if (se_cmd->data_direction == DMA_TO_DEVICE)
  86. deve->write_bytes += se_cmd->data_length;
  87. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  88. deve->read_bytes += se_cmd->data_length;
  89. deve->deve_cmds++;
  90. se_lun = deve->se_lun;
  91. se_cmd->se_lun = deve->se_lun;
  92. se_cmd->pr_res_key = deve->pr_res_key;
  93. se_cmd->orig_fe_lun = unpacked_lun;
  94. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  95. }
  96. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  97. if (!se_lun) {
  98. /*
  99. * Use the se_portal_group->tpg_virt_lun0 to allow for
  100. * REPORT_LUNS, et al to be returned when no active
  101. * MappedLUN=0 exists for this Initiator Port.
  102. */
  103. if (unpacked_lun != 0) {
  104. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  105. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  106. pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  107. " Access for 0x%08x\n",
  108. se_cmd->se_tfo->get_fabric_name(),
  109. unpacked_lun);
  110. return -ENODEV;
  111. }
  112. /*
  113. * Force WRITE PROTECT for virtual LUN 0
  114. */
  115. if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
  116. (se_cmd->data_direction != DMA_NONE)) {
  117. se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  118. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  119. return -EACCES;
  120. }
  121. se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  122. se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  123. se_cmd->orig_fe_lun = 0;
  124. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  125. }
  126. /*
  127. * Determine if the struct se_lun is online.
  128. * FIXME: Check for LUN_RESET + UNIT Attention
  129. */
  130. if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
  131. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  132. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  133. return -ENODEV;
  134. }
  135. /* Directly associate cmd with se_dev */
  136. se_cmd->se_dev = se_lun->lun_se_dev;
  137. /* TODO: get rid of this and use atomics for stats */
  138. dev = se_lun->lun_se_dev;
  139. spin_lock_irqsave(&dev->stats_lock, flags);
  140. dev->num_cmds++;
  141. if (se_cmd->data_direction == DMA_TO_DEVICE)
  142. dev->write_bytes += se_cmd->data_length;
  143. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  144. dev->read_bytes += se_cmd->data_length;
  145. spin_unlock_irqrestore(&dev->stats_lock, flags);
  146. /*
  147. * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
  148. * for tracking state of struct se_cmds during LUN shutdown events.
  149. */
  150. spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
  151. list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
  152. atomic_set(&se_cmd->transport_lun_active, 1);
  153. spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
  154. return 0;
  155. }
  156. EXPORT_SYMBOL(transport_lookup_cmd_lun);
  157. int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  158. {
  159. struct se_dev_entry *deve;
  160. struct se_lun *se_lun = NULL;
  161. struct se_session *se_sess = se_cmd->se_sess;
  162. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  163. unsigned long flags;
  164. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
  165. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  166. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  167. return -ENODEV;
  168. }
  169. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  170. se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
  171. deve = se_cmd->se_deve;
  172. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  173. se_tmr->tmr_lun = deve->se_lun;
  174. se_cmd->se_lun = deve->se_lun;
  175. se_lun = deve->se_lun;
  176. se_cmd->pr_res_key = deve->pr_res_key;
  177. se_cmd->orig_fe_lun = unpacked_lun;
  178. }
  179. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  180. if (!se_lun) {
  181. pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  182. " Access for 0x%08x\n",
  183. se_cmd->se_tfo->get_fabric_name(),
  184. unpacked_lun);
  185. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  186. return -ENODEV;
  187. }
  188. /*
  189. * Determine if the struct se_lun is online.
  190. * FIXME: Check for LUN_RESET + UNIT Attention
  191. */
  192. if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
  193. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  194. return -ENODEV;
  195. }
  196. /* Directly associate cmd with se_dev */
  197. se_cmd->se_dev = se_lun->lun_se_dev;
  198. se_tmr->tmr_dev = se_lun->lun_se_dev;
  199. spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
  200. list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
  201. spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
  202. return 0;
  203. }
  204. EXPORT_SYMBOL(transport_lookup_tmr_lun);
  205. /*
  206. * This function is called from core_scsi3_emulate_pro_register_and_move()
  207. * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
  208. * when a matching rtpi is found.
  209. */
  210. struct se_dev_entry *core_get_se_deve_from_rtpi(
  211. struct se_node_acl *nacl,
  212. u16 rtpi)
  213. {
  214. struct se_dev_entry *deve;
  215. struct se_lun *lun;
  216. struct se_port *port;
  217. struct se_portal_group *tpg = nacl->se_tpg;
  218. u32 i;
  219. spin_lock_irq(&nacl->device_list_lock);
  220. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  221. deve = &nacl->device_list[i];
  222. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  223. continue;
  224. lun = deve->se_lun;
  225. if (!lun) {
  226. pr_err("%s device entries device pointer is"
  227. " NULL, but Initiator has access.\n",
  228. tpg->se_tpg_tfo->get_fabric_name());
  229. continue;
  230. }
  231. port = lun->lun_sep;
  232. if (!port) {
  233. pr_err("%s device entries device pointer is"
  234. " NULL, but Initiator has access.\n",
  235. tpg->se_tpg_tfo->get_fabric_name());
  236. continue;
  237. }
  238. if (port->sep_rtpi != rtpi)
  239. continue;
  240. atomic_inc(&deve->pr_ref_count);
  241. smp_mb__after_atomic_inc();
  242. spin_unlock_irq(&nacl->device_list_lock);
  243. return deve;
  244. }
  245. spin_unlock_irq(&nacl->device_list_lock);
  246. return NULL;
  247. }
  248. int core_free_device_list_for_node(
  249. struct se_node_acl *nacl,
  250. struct se_portal_group *tpg)
  251. {
  252. struct se_dev_entry *deve;
  253. struct se_lun *lun;
  254. u32 i;
  255. if (!nacl->device_list)
  256. return 0;
  257. spin_lock_irq(&nacl->device_list_lock);
  258. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  259. deve = &nacl->device_list[i];
  260. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  261. continue;
  262. if (!deve->se_lun) {
  263. pr_err("%s device entries device pointer is"
  264. " NULL, but Initiator has access.\n",
  265. tpg->se_tpg_tfo->get_fabric_name());
  266. continue;
  267. }
  268. lun = deve->se_lun;
  269. spin_unlock_irq(&nacl->device_list_lock);
  270. core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
  271. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
  272. spin_lock_irq(&nacl->device_list_lock);
  273. }
  274. spin_unlock_irq(&nacl->device_list_lock);
  275. kfree(nacl->device_list);
  276. nacl->device_list = NULL;
  277. return 0;
  278. }
  279. void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
  280. {
  281. struct se_dev_entry *deve;
  282. spin_lock_irq(&se_nacl->device_list_lock);
  283. deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
  284. deve->deve_cmds--;
  285. spin_unlock_irq(&se_nacl->device_list_lock);
  286. }
  287. void core_update_device_list_access(
  288. u32 mapped_lun,
  289. u32 lun_access,
  290. struct se_node_acl *nacl)
  291. {
  292. struct se_dev_entry *deve;
  293. spin_lock_irq(&nacl->device_list_lock);
  294. deve = &nacl->device_list[mapped_lun];
  295. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  296. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  297. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  298. } else {
  299. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  300. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  301. }
  302. spin_unlock_irq(&nacl->device_list_lock);
  303. }
  304. /* core_update_device_list_for_node():
  305. *
  306. *
  307. */
  308. int core_update_device_list_for_node(
  309. struct se_lun *lun,
  310. struct se_lun_acl *lun_acl,
  311. u32 mapped_lun,
  312. u32 lun_access,
  313. struct se_node_acl *nacl,
  314. struct se_portal_group *tpg,
  315. int enable)
  316. {
  317. struct se_port *port = lun->lun_sep;
  318. struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
  319. int trans = 0;
  320. /*
  321. * If the MappedLUN entry is being disabled, the entry in
  322. * port->sep_alua_list must be removed now before clearing the
  323. * struct se_dev_entry pointers below as logic in
  324. * core_alua_do_transition_tg_pt() depends on these being present.
  325. */
  326. if (!enable) {
  327. /*
  328. * deve->se_lun_acl will be NULL for demo-mode created LUNs
  329. * that have not been explicitly concerted to MappedLUNs ->
  330. * struct se_lun_acl, but we remove deve->alua_port_list from
  331. * port->sep_alua_list. This also means that active UAs and
  332. * NodeACL context specific PR metadata for demo-mode
  333. * MappedLUN *deve will be released below..
  334. */
  335. spin_lock_bh(&port->sep_alua_lock);
  336. list_del(&deve->alua_port_list);
  337. spin_unlock_bh(&port->sep_alua_lock);
  338. }
  339. spin_lock_irq(&nacl->device_list_lock);
  340. if (enable) {
  341. /*
  342. * Check if the call is handling demo mode -> explict LUN ACL
  343. * transition. This transition must be for the same struct se_lun
  344. * + mapped_lun that was setup in demo mode..
  345. */
  346. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  347. if (deve->se_lun_acl != NULL) {
  348. pr_err("struct se_dev_entry->se_lun_acl"
  349. " already set for demo mode -> explict"
  350. " LUN ACL transition\n");
  351. spin_unlock_irq(&nacl->device_list_lock);
  352. return -EINVAL;
  353. }
  354. if (deve->se_lun != lun) {
  355. pr_err("struct se_dev_entry->se_lun does"
  356. " match passed struct se_lun for demo mode"
  357. " -> explict LUN ACL transition\n");
  358. spin_unlock_irq(&nacl->device_list_lock);
  359. return -EINVAL;
  360. }
  361. deve->se_lun_acl = lun_acl;
  362. trans = 1;
  363. } else {
  364. deve->se_lun = lun;
  365. deve->se_lun_acl = lun_acl;
  366. deve->mapped_lun = mapped_lun;
  367. deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
  368. }
  369. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  370. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  371. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  372. } else {
  373. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  374. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  375. }
  376. if (trans) {
  377. spin_unlock_irq(&nacl->device_list_lock);
  378. return 0;
  379. }
  380. deve->creation_time = get_jiffies_64();
  381. deve->attach_count++;
  382. spin_unlock_irq(&nacl->device_list_lock);
  383. spin_lock_bh(&port->sep_alua_lock);
  384. list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
  385. spin_unlock_bh(&port->sep_alua_lock);
  386. return 0;
  387. }
  388. /*
  389. * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
  390. * PR operation to complete.
  391. */
  392. spin_unlock_irq(&nacl->device_list_lock);
  393. while (atomic_read(&deve->pr_ref_count) != 0)
  394. cpu_relax();
  395. spin_lock_irq(&nacl->device_list_lock);
  396. /*
  397. * Disable struct se_dev_entry LUN ACL mapping
  398. */
  399. core_scsi3_ua_release_all(deve);
  400. deve->se_lun = NULL;
  401. deve->se_lun_acl = NULL;
  402. deve->lun_flags = 0;
  403. deve->creation_time = 0;
  404. deve->attach_count--;
  405. spin_unlock_irq(&nacl->device_list_lock);
  406. core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
  407. return 0;
  408. }
  409. /* core_clear_lun_from_tpg():
  410. *
  411. *
  412. */
  413. void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
  414. {
  415. struct se_node_acl *nacl;
  416. struct se_dev_entry *deve;
  417. u32 i;
  418. spin_lock_irq(&tpg->acl_node_lock);
  419. list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
  420. spin_unlock_irq(&tpg->acl_node_lock);
  421. spin_lock_irq(&nacl->device_list_lock);
  422. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  423. deve = &nacl->device_list[i];
  424. if (lun != deve->se_lun)
  425. continue;
  426. spin_unlock_irq(&nacl->device_list_lock);
  427. core_update_device_list_for_node(lun, NULL,
  428. deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
  429. nacl, tpg, 0);
  430. spin_lock_irq(&nacl->device_list_lock);
  431. }
  432. spin_unlock_irq(&nacl->device_list_lock);
  433. spin_lock_irq(&tpg->acl_node_lock);
  434. }
  435. spin_unlock_irq(&tpg->acl_node_lock);
  436. }
  437. static struct se_port *core_alloc_port(struct se_device *dev)
  438. {
  439. struct se_port *port, *port_tmp;
  440. port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
  441. if (!port) {
  442. pr_err("Unable to allocate struct se_port\n");
  443. return ERR_PTR(-ENOMEM);
  444. }
  445. INIT_LIST_HEAD(&port->sep_alua_list);
  446. INIT_LIST_HEAD(&port->sep_list);
  447. atomic_set(&port->sep_tg_pt_secondary_offline, 0);
  448. spin_lock_init(&port->sep_alua_lock);
  449. mutex_init(&port->sep_tg_pt_md_mutex);
  450. spin_lock(&dev->se_port_lock);
  451. if (dev->dev_port_count == 0x0000ffff) {
  452. pr_warn("Reached dev->dev_port_count =="
  453. " 0x0000ffff\n");
  454. spin_unlock(&dev->se_port_lock);
  455. return ERR_PTR(-ENOSPC);
  456. }
  457. again:
  458. /*
  459. * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
  460. * Here is the table from spc4r17 section 7.7.3.8.
  461. *
  462. * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
  463. *
  464. * Code Description
  465. * 0h Reserved
  466. * 1h Relative port 1, historically known as port A
  467. * 2h Relative port 2, historically known as port B
  468. * 3h to FFFFh Relative port 3 through 65 535
  469. */
  470. port->sep_rtpi = dev->dev_rpti_counter++;
  471. if (!port->sep_rtpi)
  472. goto again;
  473. list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
  474. /*
  475. * Make sure RELATIVE TARGET PORT IDENTIFER is unique
  476. * for 16-bit wrap..
  477. */
  478. if (port->sep_rtpi == port_tmp->sep_rtpi)
  479. goto again;
  480. }
  481. spin_unlock(&dev->se_port_lock);
  482. return port;
  483. }
  484. static void core_export_port(
  485. struct se_device *dev,
  486. struct se_portal_group *tpg,
  487. struct se_port *port,
  488. struct se_lun *lun)
  489. {
  490. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  491. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
  492. spin_lock(&dev->se_port_lock);
  493. spin_lock(&lun->lun_sep_lock);
  494. port->sep_tpg = tpg;
  495. port->sep_lun = lun;
  496. lun->lun_sep = port;
  497. spin_unlock(&lun->lun_sep_lock);
  498. list_add_tail(&port->sep_list, &dev->dev_sep_list);
  499. spin_unlock(&dev->se_port_lock);
  500. if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
  501. tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
  502. if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
  503. pr_err("Unable to allocate t10_alua_tg_pt"
  504. "_gp_member_t\n");
  505. return;
  506. }
  507. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  508. __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
  509. su_dev->t10_alua.default_tg_pt_gp);
  510. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  511. pr_debug("%s/%s: Adding to default ALUA Target Port"
  512. " Group: alua/default_tg_pt_gp\n",
  513. dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
  514. }
  515. dev->dev_port_count++;
  516. port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
  517. }
  518. /*
  519. * Called with struct se_device->se_port_lock spinlock held.
  520. */
  521. static void core_release_port(struct se_device *dev, struct se_port *port)
  522. __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
  523. {
  524. /*
  525. * Wait for any port reference for PR ALL_TG_PT=1 operation
  526. * to complete in __core_scsi3_alloc_registration()
  527. */
  528. spin_unlock(&dev->se_port_lock);
  529. if (atomic_read(&port->sep_tg_pt_ref_cnt))
  530. cpu_relax();
  531. spin_lock(&dev->se_port_lock);
  532. core_alua_free_tg_pt_gp_mem(port);
  533. list_del(&port->sep_list);
  534. dev->dev_port_count--;
  535. kfree(port);
  536. }
  537. int core_dev_export(
  538. struct se_device *dev,
  539. struct se_portal_group *tpg,
  540. struct se_lun *lun)
  541. {
  542. struct se_port *port;
  543. port = core_alloc_port(dev);
  544. if (IS_ERR(port))
  545. return PTR_ERR(port);
  546. lun->lun_se_dev = dev;
  547. se_dev_start(dev);
  548. atomic_inc(&dev->dev_export_obj.obj_access_count);
  549. core_export_port(dev, tpg, port, lun);
  550. return 0;
  551. }
  552. void core_dev_unexport(
  553. struct se_device *dev,
  554. struct se_portal_group *tpg,
  555. struct se_lun *lun)
  556. {
  557. struct se_port *port = lun->lun_sep;
  558. spin_lock(&lun->lun_sep_lock);
  559. if (lun->lun_se_dev == NULL) {
  560. spin_unlock(&lun->lun_sep_lock);
  561. return;
  562. }
  563. spin_unlock(&lun->lun_sep_lock);
  564. spin_lock(&dev->se_port_lock);
  565. atomic_dec(&dev->dev_export_obj.obj_access_count);
  566. core_release_port(dev, port);
  567. spin_unlock(&dev->se_port_lock);
  568. se_dev_stop(dev);
  569. lun->lun_se_dev = NULL;
  570. }
  571. int target_report_luns(struct se_task *se_task)
  572. {
  573. struct se_cmd *se_cmd = se_task->task_se_cmd;
  574. struct se_dev_entry *deve;
  575. struct se_lun *se_lun;
  576. struct se_session *se_sess = se_cmd->se_sess;
  577. unsigned char *buf;
  578. u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
  579. buf = transport_kmap_first_data_page(se_cmd);
  580. /*
  581. * If no struct se_session pointer is present, this struct se_cmd is
  582. * coming via a target_core_mod PASSTHROUGH op, and not through
  583. * a $FABRIC_MOD. In that case, report LUN=0 only.
  584. */
  585. if (!se_sess) {
  586. int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
  587. lun_count = 1;
  588. goto done;
  589. }
  590. spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
  591. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  592. deve = &se_sess->se_node_acl->device_list[i];
  593. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  594. continue;
  595. se_lun = deve->se_lun;
  596. /*
  597. * We determine the correct LUN LIST LENGTH even once we
  598. * have reached the initial allocation length.
  599. * See SPC2-R20 7.19.
  600. */
  601. lun_count++;
  602. if ((cdb_offset + 8) >= se_cmd->data_length)
  603. continue;
  604. int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
  605. offset += 8;
  606. cdb_offset += 8;
  607. }
  608. spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
  609. /*
  610. * See SPC3 r07, page 159.
  611. */
  612. done:
  613. transport_kunmap_first_data_page(se_cmd);
  614. lun_count *= 8;
  615. buf[0] = ((lun_count >> 24) & 0xff);
  616. buf[1] = ((lun_count >> 16) & 0xff);
  617. buf[2] = ((lun_count >> 8) & 0xff);
  618. buf[3] = (lun_count & 0xff);
  619. se_task->task_scsi_status = GOOD;
  620. transport_complete_task(se_task, 1);
  621. return 0;
  622. }
  623. /* se_release_device_for_hba():
  624. *
  625. *
  626. */
  627. void se_release_device_for_hba(struct se_device *dev)
  628. {
  629. struct se_hba *hba = dev->se_hba;
  630. if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
  631. (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
  632. (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
  633. (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
  634. (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
  635. se_dev_stop(dev);
  636. if (dev->dev_ptr) {
  637. kthread_stop(dev->process_thread);
  638. if (dev->transport->free_device)
  639. dev->transport->free_device(dev->dev_ptr);
  640. }
  641. spin_lock(&hba->device_lock);
  642. list_del(&dev->dev_list);
  643. hba->dev_count--;
  644. spin_unlock(&hba->device_lock);
  645. core_scsi3_free_all_registrations(dev);
  646. se_release_vpd_for_dev(dev);
  647. kfree(dev);
  648. }
  649. void se_release_vpd_for_dev(struct se_device *dev)
  650. {
  651. struct t10_vpd *vpd, *vpd_tmp;
  652. spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
  653. list_for_each_entry_safe(vpd, vpd_tmp,
  654. &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
  655. list_del(&vpd->vpd_list);
  656. kfree(vpd);
  657. }
  658. spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
  659. }
  660. /* se_free_virtual_device():
  661. *
  662. * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
  663. */
  664. int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
  665. {
  666. if (!list_empty(&dev->dev_sep_list))
  667. dump_stack();
  668. core_alua_free_lu_gp_mem(dev);
  669. se_release_device_for_hba(dev);
  670. return 0;
  671. }
  672. static void se_dev_start(struct se_device *dev)
  673. {
  674. struct se_hba *hba = dev->se_hba;
  675. spin_lock(&hba->device_lock);
  676. atomic_inc(&dev->dev_obj.obj_access_count);
  677. if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
  678. if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
  679. dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
  680. dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
  681. } else if (dev->dev_status &
  682. TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
  683. dev->dev_status &=
  684. ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
  685. dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
  686. }
  687. }
  688. spin_unlock(&hba->device_lock);
  689. }
  690. static void se_dev_stop(struct se_device *dev)
  691. {
  692. struct se_hba *hba = dev->se_hba;
  693. spin_lock(&hba->device_lock);
  694. atomic_dec(&dev->dev_obj.obj_access_count);
  695. if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
  696. if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
  697. dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
  698. dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
  699. } else if (dev->dev_status &
  700. TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
  701. dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
  702. dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
  703. }
  704. }
  705. spin_unlock(&hba->device_lock);
  706. }
  707. int se_dev_check_online(struct se_device *dev)
  708. {
  709. unsigned long flags;
  710. int ret;
  711. spin_lock_irqsave(&dev->dev_status_lock, flags);
  712. ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
  713. (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
  714. spin_unlock_irqrestore(&dev->dev_status_lock, flags);
  715. return ret;
  716. }
  717. int se_dev_check_shutdown(struct se_device *dev)
  718. {
  719. int ret;
  720. spin_lock_irq(&dev->dev_status_lock);
  721. ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
  722. spin_unlock_irq(&dev->dev_status_lock);
  723. return ret;
  724. }
  725. u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
  726. {
  727. u32 tmp, aligned_max_sectors;
  728. /*
  729. * Limit max_sectors to a PAGE_SIZE aligned value for modern
  730. * transport_allocate_data_tasks() operation.
  731. */
  732. tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
  733. aligned_max_sectors = (tmp / block_size);
  734. if (max_sectors != aligned_max_sectors) {
  735. printk(KERN_INFO "Rounding down aligned max_sectors from %u"
  736. " to %u\n", max_sectors, aligned_max_sectors);
  737. return aligned_max_sectors;
  738. }
  739. return max_sectors;
  740. }
  741. void se_dev_set_default_attribs(
  742. struct se_device *dev,
  743. struct se_dev_limits *dev_limits)
  744. {
  745. struct queue_limits *limits = &dev_limits->limits;
  746. dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
  747. dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
  748. dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
  749. dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
  750. dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
  751. dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
  752. dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
  753. dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
  754. dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
  755. dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
  756. dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
  757. dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
  758. dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
  759. /*
  760. * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
  761. * iblock_create_virtdevice() from struct queue_limits values
  762. * if blk_queue_discard()==1
  763. */
  764. dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
  765. dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
  766. DA_MAX_UNMAP_BLOCK_DESC_COUNT;
  767. dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
  768. dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
  769. DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
  770. /*
  771. * block_size is based on subsystem plugin dependent requirements.
  772. */
  773. dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
  774. dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
  775. /*
  776. * max_sectors is based on subsystem plugin dependent requirements.
  777. */
  778. dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
  779. /*
  780. * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
  781. */
  782. limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
  783. limits->logical_block_size);
  784. dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
  785. /*
  786. * Set optimal_sectors from max_sectors, which can be lowered via
  787. * configfs.
  788. */
  789. dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
  790. /*
  791. * queue_depth is based on subsystem plugin dependent requirements.
  792. */
  793. dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
  794. dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
  795. }
  796. int se_dev_set_max_unmap_lba_count(
  797. struct se_device *dev,
  798. u32 max_unmap_lba_count)
  799. {
  800. dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
  801. pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
  802. dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
  803. return 0;
  804. }
  805. int se_dev_set_max_unmap_block_desc_count(
  806. struct se_device *dev,
  807. u32 max_unmap_block_desc_count)
  808. {
  809. dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
  810. max_unmap_block_desc_count;
  811. pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
  812. dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
  813. return 0;
  814. }
  815. int se_dev_set_unmap_granularity(
  816. struct se_device *dev,
  817. u32 unmap_granularity)
  818. {
  819. dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
  820. pr_debug("dev[%p]: Set unmap_granularity: %u\n",
  821. dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
  822. return 0;
  823. }
  824. int se_dev_set_unmap_granularity_alignment(
  825. struct se_device *dev,
  826. u32 unmap_granularity_alignment)
  827. {
  828. dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
  829. pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
  830. dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
  831. return 0;
  832. }
  833. int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
  834. {
  835. if (flag != 0 && flag != 1) {
  836. pr_err("Illegal value %d\n", flag);
  837. return -EINVAL;
  838. }
  839. if (flag) {
  840. pr_err("dpo_emulated not supported\n");
  841. return -EINVAL;
  842. }
  843. return 0;
  844. }
  845. int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
  846. {
  847. if (flag != 0 && flag != 1) {
  848. pr_err("Illegal value %d\n", flag);
  849. return -EINVAL;
  850. }
  851. if (flag && dev->transport->fua_write_emulated == 0) {
  852. pr_err("fua_write_emulated not supported\n");
  853. return -EINVAL;
  854. }
  855. dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
  856. pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
  857. dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
  858. return 0;
  859. }
  860. int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
  861. {
  862. if (flag != 0 && flag != 1) {
  863. pr_err("Illegal value %d\n", flag);
  864. return -EINVAL;
  865. }
  866. if (flag) {
  867. pr_err("ua read emulated not supported\n");
  868. return -EINVAL;
  869. }
  870. return 0;
  871. }
  872. int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
  873. {
  874. if (flag != 0 && flag != 1) {
  875. pr_err("Illegal value %d\n", flag);
  876. return -EINVAL;
  877. }
  878. if (flag && dev->transport->write_cache_emulated == 0) {
  879. pr_err("write_cache_emulated not supported\n");
  880. return -EINVAL;
  881. }
  882. dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
  883. pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
  884. dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
  885. return 0;
  886. }
  887. int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
  888. {
  889. if ((flag != 0) && (flag != 1) && (flag != 2)) {
  890. pr_err("Illegal value %d\n", flag);
  891. return -EINVAL;
  892. }
  893. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  894. pr_err("dev[%p]: Unable to change SE Device"
  895. " UA_INTRLCK_CTRL while dev_export_obj: %d count"
  896. " exists\n", dev,
  897. atomic_read(&dev->dev_export_obj.obj_access_count));
  898. return -EINVAL;
  899. }
  900. dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
  901. pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
  902. dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
  903. return 0;
  904. }
  905. int se_dev_set_emulate_tas(struct se_device *dev, int flag)
  906. {
  907. if ((flag != 0) && (flag != 1)) {
  908. pr_err("Illegal value %d\n", flag);
  909. return -EINVAL;
  910. }
  911. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  912. pr_err("dev[%p]: Unable to change SE Device TAS while"
  913. " dev_export_obj: %d count exists\n", dev,
  914. atomic_read(&dev->dev_export_obj.obj_access_count));
  915. return -EINVAL;
  916. }
  917. dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
  918. pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
  919. dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
  920. return 0;
  921. }
  922. int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
  923. {
  924. if ((flag != 0) && (flag != 1)) {
  925. pr_err("Illegal value %d\n", flag);
  926. return -EINVAL;
  927. }
  928. /*
  929. * We expect this value to be non-zero when generic Block Layer
  930. * Discard supported is detected iblock_create_virtdevice().
  931. */
  932. if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
  933. pr_err("Generic Block Discard not supported\n");
  934. return -ENOSYS;
  935. }
  936. dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
  937. pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
  938. dev, flag);
  939. return 0;
  940. }
  941. int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
  942. {
  943. if ((flag != 0) && (flag != 1)) {
  944. pr_err("Illegal value %d\n", flag);
  945. return -EINVAL;
  946. }
  947. /*
  948. * We expect this value to be non-zero when generic Block Layer
  949. * Discard supported is detected iblock_create_virtdevice().
  950. */
  951. if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
  952. pr_err("Generic Block Discard not supported\n");
  953. return -ENOSYS;
  954. }
  955. dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
  956. pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
  957. dev, flag);
  958. return 0;
  959. }
  960. int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
  961. {
  962. if ((flag != 0) && (flag != 1)) {
  963. pr_err("Illegal value %d\n", flag);
  964. return -EINVAL;
  965. }
  966. dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
  967. pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
  968. (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
  969. return 0;
  970. }
  971. int se_dev_set_is_nonrot(struct se_device *dev, int flag)
  972. {
  973. if ((flag != 0) && (flag != 1)) {
  974. printk(KERN_ERR "Illegal value %d\n", flag);
  975. return -EINVAL;
  976. }
  977. dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
  978. pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
  979. dev, flag);
  980. return 0;
  981. }
  982. int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
  983. {
  984. if (flag != 0) {
  985. printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
  986. " reordering not implemented\n", dev);
  987. return -ENOSYS;
  988. }
  989. dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
  990. pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
  991. return 0;
  992. }
  993. /*
  994. * Note, this can only be called on unexported SE Device Object.
  995. */
  996. int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
  997. {
  998. u32 orig_queue_depth = dev->queue_depth;
  999. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1000. pr_err("dev[%p]: Unable to change SE Device TCQ while"
  1001. " dev_export_obj: %d count exists\n", dev,
  1002. atomic_read(&dev->dev_export_obj.obj_access_count));
  1003. return -EINVAL;
  1004. }
  1005. if (!queue_depth) {
  1006. pr_err("dev[%p]: Illegal ZERO value for queue"
  1007. "_depth\n", dev);
  1008. return -EINVAL;
  1009. }
  1010. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1011. if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
  1012. pr_err("dev[%p]: Passed queue_depth: %u"
  1013. " exceeds TCM/SE_Device TCQ: %u\n",
  1014. dev, queue_depth,
  1015. dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
  1016. return -EINVAL;
  1017. }
  1018. } else {
  1019. if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
  1020. if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
  1021. pr_err("dev[%p]: Passed queue_depth:"
  1022. " %u exceeds TCM/SE_Device MAX"
  1023. " TCQ: %u\n", dev, queue_depth,
  1024. dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
  1025. return -EINVAL;
  1026. }
  1027. }
  1028. }
  1029. dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
  1030. if (queue_depth > orig_queue_depth)
  1031. atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
  1032. else if (queue_depth < orig_queue_depth)
  1033. atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
  1034. pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
  1035. dev, queue_depth);
  1036. return 0;
  1037. }
  1038. int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
  1039. {
  1040. int force = 0; /* Force setting for VDEVS */
  1041. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1042. pr_err("dev[%p]: Unable to change SE Device"
  1043. " max_sectors while dev_export_obj: %d count exists\n",
  1044. dev, atomic_read(&dev->dev_export_obj.obj_access_count));
  1045. return -EINVAL;
  1046. }
  1047. if (!max_sectors) {
  1048. pr_err("dev[%p]: Illegal ZERO value for"
  1049. " max_sectors\n", dev);
  1050. return -EINVAL;
  1051. }
  1052. if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
  1053. pr_err("dev[%p]: Passed max_sectors: %u less than"
  1054. " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
  1055. DA_STATUS_MAX_SECTORS_MIN);
  1056. return -EINVAL;
  1057. }
  1058. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1059. if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
  1060. pr_err("dev[%p]: Passed max_sectors: %u"
  1061. " greater than TCM/SE_Device max_sectors:"
  1062. " %u\n", dev, max_sectors,
  1063. dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
  1064. return -EINVAL;
  1065. }
  1066. } else {
  1067. if (!force && (max_sectors >
  1068. dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
  1069. pr_err("dev[%p]: Passed max_sectors: %u"
  1070. " greater than TCM/SE_Device max_sectors"
  1071. ": %u, use force=1 to override.\n", dev,
  1072. max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
  1073. return -EINVAL;
  1074. }
  1075. if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
  1076. pr_err("dev[%p]: Passed max_sectors: %u"
  1077. " greater than DA_STATUS_MAX_SECTORS_MAX:"
  1078. " %u\n", dev, max_sectors,
  1079. DA_STATUS_MAX_SECTORS_MAX);
  1080. return -EINVAL;
  1081. }
  1082. }
  1083. /*
  1084. * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
  1085. */
  1086. max_sectors = se_dev_align_max_sectors(max_sectors,
  1087. dev->se_sub_dev->se_dev_attrib.block_size);
  1088. dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
  1089. pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
  1090. dev, max_sectors);
  1091. return 0;
  1092. }
  1093. int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
  1094. {
  1095. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1096. pr_err("dev[%p]: Unable to change SE Device"
  1097. " optimal_sectors while dev_export_obj: %d count exists\n",
  1098. dev, atomic_read(&dev->dev_export_obj.obj_access_count));
  1099. return -EINVAL;
  1100. }
  1101. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1102. pr_err("dev[%p]: Passed optimal_sectors cannot be"
  1103. " changed for TCM/pSCSI\n", dev);
  1104. return -EINVAL;
  1105. }
  1106. if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
  1107. pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
  1108. " greater than max_sectors: %u\n", dev,
  1109. optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
  1110. return -EINVAL;
  1111. }
  1112. dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
  1113. pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
  1114. dev, optimal_sectors);
  1115. return 0;
  1116. }
  1117. int se_dev_set_block_size(struct se_device *dev, u32 block_size)
  1118. {
  1119. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1120. pr_err("dev[%p]: Unable to change SE Device block_size"
  1121. " while dev_export_obj: %d count exists\n", dev,
  1122. atomic_read(&dev->dev_export_obj.obj_access_count));
  1123. return -EINVAL;
  1124. }
  1125. if ((block_size != 512) &&
  1126. (block_size != 1024) &&
  1127. (block_size != 2048) &&
  1128. (block_size != 4096)) {
  1129. pr_err("dev[%p]: Illegal value for block_device: %u"
  1130. " for SE device, must be 512, 1024, 2048 or 4096\n",
  1131. dev, block_size);
  1132. return -EINVAL;
  1133. }
  1134. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1135. pr_err("dev[%p]: Not allowed to change block_size for"
  1136. " Physical Device, use for Linux/SCSI to change"
  1137. " block_size for underlying hardware\n", dev);
  1138. return -EINVAL;
  1139. }
  1140. dev->se_sub_dev->se_dev_attrib.block_size = block_size;
  1141. pr_debug("dev[%p]: SE Device block_size changed to %u\n",
  1142. dev, block_size);
  1143. return 0;
  1144. }
  1145. struct se_lun *core_dev_add_lun(
  1146. struct se_portal_group *tpg,
  1147. struct se_hba *hba,
  1148. struct se_device *dev,
  1149. u32 lun)
  1150. {
  1151. struct se_lun *lun_p;
  1152. u32 lun_access = 0;
  1153. if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
  1154. pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
  1155. atomic_read(&dev->dev_access_obj.obj_access_count));
  1156. return NULL;
  1157. }
  1158. lun_p = core_tpg_pre_addlun(tpg, lun);
  1159. if ((IS_ERR(lun_p)) || !lun_p)
  1160. return NULL;
  1161. if (dev->dev_flags & DF_READ_ONLY)
  1162. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  1163. else
  1164. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  1165. if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
  1166. return NULL;
  1167. pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
  1168. " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1169. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
  1170. tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
  1171. /*
  1172. * Update LUN maps for dynamically added initiators when
  1173. * generate_node_acl is enabled.
  1174. */
  1175. if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
  1176. struct se_node_acl *acl;
  1177. spin_lock_irq(&tpg->acl_node_lock);
  1178. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  1179. if (acl->dynamic_node_acl &&
  1180. (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
  1181. !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
  1182. spin_unlock_irq(&tpg->acl_node_lock);
  1183. core_tpg_add_node_to_devs(acl, tpg);
  1184. spin_lock_irq(&tpg->acl_node_lock);
  1185. }
  1186. }
  1187. spin_unlock_irq(&tpg->acl_node_lock);
  1188. }
  1189. return lun_p;
  1190. }
  1191. /* core_dev_del_lun():
  1192. *
  1193. *
  1194. */
  1195. int core_dev_del_lun(
  1196. struct se_portal_group *tpg,
  1197. u32 unpacked_lun)
  1198. {
  1199. struct se_lun *lun;
  1200. int ret = 0;
  1201. lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
  1202. if (!lun)
  1203. return ret;
  1204. core_tpg_post_dellun(tpg, lun);
  1205. pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
  1206. " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
  1207. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
  1208. tpg->se_tpg_tfo->get_fabric_name());
  1209. return 0;
  1210. }
  1211. struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
  1212. {
  1213. struct se_lun *lun;
  1214. spin_lock(&tpg->tpg_lun_lock);
  1215. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1216. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
  1217. "_PER_TPG-1: %u for Target Portal Group: %hu\n",
  1218. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1219. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1220. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1221. spin_unlock(&tpg->tpg_lun_lock);
  1222. return NULL;
  1223. }
  1224. lun = &tpg->tpg_lun_list[unpacked_lun];
  1225. if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
  1226. pr_err("%s Logical Unit Number: %u is not free on"
  1227. " Target Portal Group: %hu, ignoring request.\n",
  1228. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1229. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1230. spin_unlock(&tpg->tpg_lun_lock);
  1231. return NULL;
  1232. }
  1233. spin_unlock(&tpg->tpg_lun_lock);
  1234. return lun;
  1235. }
  1236. /* core_dev_get_lun():
  1237. *
  1238. *
  1239. */
  1240. static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
  1241. {
  1242. struct se_lun *lun;
  1243. spin_lock(&tpg->tpg_lun_lock);
  1244. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1245. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
  1246. "_TPG-1: %u for Target Portal Group: %hu\n",
  1247. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1248. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1249. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1250. spin_unlock(&tpg->tpg_lun_lock);
  1251. return NULL;
  1252. }
  1253. lun = &tpg->tpg_lun_list[unpacked_lun];
  1254. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  1255. pr_err("%s Logical Unit Number: %u is not active on"
  1256. " Target Portal Group: %hu, ignoring request.\n",
  1257. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1258. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1259. spin_unlock(&tpg->tpg_lun_lock);
  1260. return NULL;
  1261. }
  1262. spin_unlock(&tpg->tpg_lun_lock);
  1263. return lun;
  1264. }
  1265. struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
  1266. struct se_portal_group *tpg,
  1267. u32 mapped_lun,
  1268. char *initiatorname,
  1269. int *ret)
  1270. {
  1271. struct se_lun_acl *lacl;
  1272. struct se_node_acl *nacl;
  1273. if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
  1274. pr_err("%s InitiatorName exceeds maximum size.\n",
  1275. tpg->se_tpg_tfo->get_fabric_name());
  1276. *ret = -EOVERFLOW;
  1277. return NULL;
  1278. }
  1279. nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
  1280. if (!nacl) {
  1281. *ret = -EINVAL;
  1282. return NULL;
  1283. }
  1284. lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
  1285. if (!lacl) {
  1286. pr_err("Unable to allocate memory for struct se_lun_acl.\n");
  1287. *ret = -ENOMEM;
  1288. return NULL;
  1289. }
  1290. INIT_LIST_HEAD(&lacl->lacl_list);
  1291. lacl->mapped_lun = mapped_lun;
  1292. lacl->se_lun_nacl = nacl;
  1293. snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  1294. return lacl;
  1295. }
  1296. int core_dev_add_initiator_node_lun_acl(
  1297. struct se_portal_group *tpg,
  1298. struct se_lun_acl *lacl,
  1299. u32 unpacked_lun,
  1300. u32 lun_access)
  1301. {
  1302. struct se_lun *lun;
  1303. struct se_node_acl *nacl;
  1304. lun = core_dev_get_lun(tpg, unpacked_lun);
  1305. if (!lun) {
  1306. pr_err("%s Logical Unit Number: %u is not active on"
  1307. " Target Portal Group: %hu, ignoring request.\n",
  1308. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1309. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1310. return -EINVAL;
  1311. }
  1312. nacl = lacl->se_lun_nacl;
  1313. if (!nacl)
  1314. return -EINVAL;
  1315. if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
  1316. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
  1317. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  1318. lacl->se_lun = lun;
  1319. if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
  1320. lun_access, nacl, tpg, 1) < 0)
  1321. return -EINVAL;
  1322. spin_lock(&lun->lun_acl_lock);
  1323. list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
  1324. atomic_inc(&lun->lun_acl_count);
  1325. smp_mb__after_atomic_inc();
  1326. spin_unlock(&lun->lun_acl_lock);
  1327. pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
  1328. " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  1329. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
  1330. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
  1331. lacl->initiatorname);
  1332. /*
  1333. * Check to see if there are any existing persistent reservation APTPL
  1334. * pre-registrations that need to be enabled for this LUN ACL..
  1335. */
  1336. core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
  1337. return 0;
  1338. }
  1339. /* core_dev_del_initiator_node_lun_acl():
  1340. *
  1341. *
  1342. */
  1343. int core_dev_del_initiator_node_lun_acl(
  1344. struct se_portal_group *tpg,
  1345. struct se_lun *lun,
  1346. struct se_lun_acl *lacl)
  1347. {
  1348. struct se_node_acl *nacl;
  1349. nacl = lacl->se_lun_nacl;
  1350. if (!nacl)
  1351. return -EINVAL;
  1352. spin_lock(&lun->lun_acl_lock);
  1353. list_del(&lacl->lacl_list);
  1354. atomic_dec(&lun->lun_acl_count);
  1355. smp_mb__after_atomic_dec();
  1356. spin_unlock(&lun->lun_acl_lock);
  1357. core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
  1358. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
  1359. lacl->se_lun = NULL;
  1360. pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
  1361. " InitiatorNode: %s Mapped LUN: %u\n",
  1362. tpg->se_tpg_tfo->get_fabric_name(),
  1363. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  1364. lacl->initiatorname, lacl->mapped_lun);
  1365. return 0;
  1366. }
  1367. void core_dev_free_initiator_node_lun_acl(
  1368. struct se_portal_group *tpg,
  1369. struct se_lun_acl *lacl)
  1370. {
  1371. pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
  1372. " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1373. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  1374. tpg->se_tpg_tfo->get_fabric_name(),
  1375. lacl->initiatorname, lacl->mapped_lun);
  1376. kfree(lacl);
  1377. }
  1378. int core_dev_setup_virtual_lun0(void)
  1379. {
  1380. struct se_hba *hba;
  1381. struct se_device *dev;
  1382. struct se_subsystem_dev *se_dev = NULL;
  1383. struct se_subsystem_api *t;
  1384. char buf[16];
  1385. int ret;
  1386. hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
  1387. if (IS_ERR(hba))
  1388. return PTR_ERR(hba);
  1389. lun0_hba = hba;
  1390. t = hba->transport;
  1391. se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
  1392. if (!se_dev) {
  1393. pr_err("Unable to allocate memory for"
  1394. " struct se_subsystem_dev\n");
  1395. ret = -ENOMEM;
  1396. goto out;
  1397. }
  1398. INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
  1399. spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
  1400. INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
  1401. INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
  1402. spin_lock_init(&se_dev->t10_pr.registration_lock);
  1403. spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
  1404. INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
  1405. spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
  1406. spin_lock_init(&se_dev->se_dev_lock);
  1407. se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
  1408. se_dev->t10_wwn.t10_sub_dev = se_dev;
  1409. se_dev->t10_alua.t10_sub_dev = se_dev;
  1410. se_dev->se_dev_attrib.da_sub_dev = se_dev;
  1411. se_dev->se_dev_hba = hba;
  1412. se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
  1413. if (!se_dev->se_dev_su_ptr) {
  1414. pr_err("Unable to locate subsystem dependent pointer"
  1415. " from allocate_virtdevice()\n");
  1416. ret = -ENOMEM;
  1417. goto out;
  1418. }
  1419. lun0_su_dev = se_dev;
  1420. memset(buf, 0, 16);
  1421. sprintf(buf, "rd_pages=8");
  1422. t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
  1423. dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
  1424. if (IS_ERR(dev)) {
  1425. ret = PTR_ERR(dev);
  1426. goto out;
  1427. }
  1428. se_dev->se_dev_ptr = dev;
  1429. g_lun0_dev = dev;
  1430. return 0;
  1431. out:
  1432. lun0_su_dev = NULL;
  1433. kfree(se_dev);
  1434. if (lun0_hba) {
  1435. core_delete_hba(lun0_hba);
  1436. lun0_hba = NULL;
  1437. }
  1438. return ret;
  1439. }
  1440. void core_dev_release_virtual_lun0(void)
  1441. {
  1442. struct se_hba *hba = lun0_hba;
  1443. struct se_subsystem_dev *su_dev = lun0_su_dev;
  1444. if (!hba)
  1445. return;
  1446. if (g_lun0_dev)
  1447. se_free_virtual_device(g_lun0_dev, hba);
  1448. kfree(su_dev);
  1449. core_delete_hba(hba);
  1450. }