target_core_tpg.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /*******************************************************************************
  2. * Filename: target_core_tpg.c
  3. *
  4. * This file contains generic Target Portal Group related functions.
  5. *
  6. * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
  7. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  8. * Copyright (c) 2007-2010 Rising Tide Systems
  9. * Copyright (c) 2008-2010 Linux-iSCSI.org
  10. *
  11. * Nicholas A. Bellinger <nab@kernel.org>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. *
  27. ******************************************************************************/
  28. #include <linux/net.h>
  29. #include <linux/string.h>
  30. #include <linux/timer.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/in.h>
  34. #include <net/sock.h>
  35. #include <net/tcp.h>
  36. #include <scsi/scsi.h>
  37. #include <scsi/scsi_cmnd.h>
  38. #include <target/target_core_base.h>
  39. #include <target/target_core_device.h>
  40. #include <target/target_core_tpg.h>
  41. #include <target/target_core_transport.h>
  42. #include <target/target_core_fabric_ops.h>
  43. #include "target_core_hba.h"
  44. #include "target_core_stat.h"
  45. extern struct se_device *g_lun0_dev;
  46. static DEFINE_SPINLOCK(tpg_lock);
  47. static LIST_HEAD(tpg_list);
  48. /* core_clear_initiator_node_from_tpg():
  49. *
  50. *
  51. */
  52. static void core_clear_initiator_node_from_tpg(
  53. struct se_node_acl *nacl,
  54. struct se_portal_group *tpg)
  55. {
  56. int i;
  57. struct se_dev_entry *deve;
  58. struct se_lun *lun;
  59. struct se_lun_acl *acl, *acl_tmp;
  60. spin_lock_irq(&nacl->device_list_lock);
  61. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  62. deve = &nacl->device_list[i];
  63. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  64. continue;
  65. if (!deve->se_lun) {
  66. pr_err("%s device entries device pointer is"
  67. " NULL, but Initiator has access.\n",
  68. tpg->se_tpg_tfo->get_fabric_name());
  69. continue;
  70. }
  71. lun = deve->se_lun;
  72. spin_unlock_irq(&nacl->device_list_lock);
  73. core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
  74. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
  75. spin_lock(&lun->lun_acl_lock);
  76. list_for_each_entry_safe(acl, acl_tmp,
  77. &lun->lun_acl_list, lacl_list) {
  78. if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
  79. (acl->mapped_lun == deve->mapped_lun))
  80. break;
  81. }
  82. if (!acl) {
  83. pr_err("Unable to locate struct se_lun_acl for %s,"
  84. " mapped_lun: %u\n", nacl->initiatorname,
  85. deve->mapped_lun);
  86. spin_unlock(&lun->lun_acl_lock);
  87. spin_lock_irq(&nacl->device_list_lock);
  88. continue;
  89. }
  90. list_del(&acl->lacl_list);
  91. spin_unlock(&lun->lun_acl_lock);
  92. spin_lock_irq(&nacl->device_list_lock);
  93. kfree(acl);
  94. }
  95. spin_unlock_irq(&nacl->device_list_lock);
  96. }
  97. /* __core_tpg_get_initiator_node_acl():
  98. *
  99. * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
  100. */
  101. struct se_node_acl *__core_tpg_get_initiator_node_acl(
  102. struct se_portal_group *tpg,
  103. const char *initiatorname)
  104. {
  105. struct se_node_acl *acl;
  106. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  107. if (!strcmp(acl->initiatorname, initiatorname))
  108. return acl;
  109. }
  110. return NULL;
  111. }
  112. /* core_tpg_get_initiator_node_acl():
  113. *
  114. *
  115. */
  116. struct se_node_acl *core_tpg_get_initiator_node_acl(
  117. struct se_portal_group *tpg,
  118. unsigned char *initiatorname)
  119. {
  120. struct se_node_acl *acl;
  121. spin_lock_irq(&tpg->acl_node_lock);
  122. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  123. if (!strcmp(acl->initiatorname, initiatorname) &&
  124. !acl->dynamic_node_acl) {
  125. spin_unlock_irq(&tpg->acl_node_lock);
  126. return acl;
  127. }
  128. }
  129. spin_unlock_irq(&tpg->acl_node_lock);
  130. return NULL;
  131. }
  132. /* core_tpg_add_node_to_devs():
  133. *
  134. *
  135. */
  136. void core_tpg_add_node_to_devs(
  137. struct se_node_acl *acl,
  138. struct se_portal_group *tpg)
  139. {
  140. int i = 0;
  141. u32 lun_access = 0;
  142. struct se_lun *lun;
  143. struct se_device *dev;
  144. spin_lock(&tpg->tpg_lun_lock);
  145. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  146. lun = &tpg->tpg_lun_list[i];
  147. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
  148. continue;
  149. spin_unlock(&tpg->tpg_lun_lock);
  150. dev = lun->lun_se_dev;
  151. /*
  152. * By default in LIO-Target $FABRIC_MOD,
  153. * demo_mode_write_protect is ON, or READ_ONLY;
  154. */
  155. if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
  156. if (dev->dev_flags & DF_READ_ONLY)
  157. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  158. else
  159. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  160. } else {
  161. /*
  162. * Allow only optical drives to issue R/W in default RO
  163. * demo mode.
  164. */
  165. if (dev->transport->get_device_type(dev) == TYPE_DISK)
  166. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  167. else
  168. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  169. }
  170. pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
  171. " access for LUN in Demo Mode\n",
  172. tpg->se_tpg_tfo->get_fabric_name(),
  173. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  174. (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
  175. "READ-WRITE" : "READ-ONLY");
  176. core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
  177. lun_access, acl, tpg, 1);
  178. spin_lock(&tpg->tpg_lun_lock);
  179. }
  180. spin_unlock(&tpg->tpg_lun_lock);
  181. }
  182. /* core_set_queue_depth_for_node():
  183. *
  184. *
  185. */
  186. static int core_set_queue_depth_for_node(
  187. struct se_portal_group *tpg,
  188. struct se_node_acl *acl)
  189. {
  190. if (!acl->queue_depth) {
  191. pr_err("Queue depth for %s Initiator Node: %s is 0,"
  192. "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
  193. acl->initiatorname);
  194. acl->queue_depth = 1;
  195. }
  196. return 0;
  197. }
  198. /* core_create_device_list_for_node():
  199. *
  200. *
  201. */
  202. static int core_create_device_list_for_node(struct se_node_acl *nacl)
  203. {
  204. struct se_dev_entry *deve;
  205. int i;
  206. nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
  207. TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
  208. if (!nacl->device_list) {
  209. pr_err("Unable to allocate memory for"
  210. " struct se_node_acl->device_list\n");
  211. return -ENOMEM;
  212. }
  213. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  214. deve = &nacl->device_list[i];
  215. atomic_set(&deve->ua_count, 0);
  216. atomic_set(&deve->pr_ref_count, 0);
  217. spin_lock_init(&deve->ua_lock);
  218. INIT_LIST_HEAD(&deve->alua_port_list);
  219. INIT_LIST_HEAD(&deve->ua_list);
  220. }
  221. return 0;
  222. }
  223. /* core_tpg_check_initiator_node_acl()
  224. *
  225. *
  226. */
  227. struct se_node_acl *core_tpg_check_initiator_node_acl(
  228. struct se_portal_group *tpg,
  229. unsigned char *initiatorname)
  230. {
  231. struct se_node_acl *acl;
  232. acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
  233. if (acl)
  234. return acl;
  235. if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
  236. return NULL;
  237. acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
  238. if (!acl)
  239. return NULL;
  240. INIT_LIST_HEAD(&acl->acl_list);
  241. INIT_LIST_HEAD(&acl->acl_sess_list);
  242. spin_lock_init(&acl->device_list_lock);
  243. spin_lock_init(&acl->nacl_sess_lock);
  244. atomic_set(&acl->acl_pr_ref_count, 0);
  245. acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
  246. snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  247. acl->se_tpg = tpg;
  248. acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
  249. spin_lock_init(&acl->stats_lock);
  250. acl->dynamic_node_acl = 1;
  251. tpg->se_tpg_tfo->set_default_node_attributes(acl);
  252. if (core_create_device_list_for_node(acl) < 0) {
  253. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  254. return NULL;
  255. }
  256. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  257. core_free_device_list_for_node(acl, tpg);
  258. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  259. return NULL;
  260. }
  261. /*
  262. * Here we only create demo-mode MappedLUNs from the active
  263. * TPG LUNs if the fabric is not explictly asking for
  264. * tpg_check_demo_mode_login_only() == 1.
  265. */
  266. if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
  267. (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
  268. do { ; } while (0);
  269. else
  270. core_tpg_add_node_to_devs(acl, tpg);
  271. spin_lock_irq(&tpg->acl_node_lock);
  272. list_add_tail(&acl->acl_list, &tpg->acl_node_list);
  273. tpg->num_node_acls++;
  274. spin_unlock_irq(&tpg->acl_node_lock);
  275. pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
  276. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  277. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  278. tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
  279. return acl;
  280. }
  281. EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
  282. void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
  283. {
  284. while (atomic_read(&nacl->acl_pr_ref_count) != 0)
  285. cpu_relax();
  286. }
  287. void core_tpg_clear_object_luns(struct se_portal_group *tpg)
  288. {
  289. int i, ret;
  290. struct se_lun *lun;
  291. spin_lock(&tpg->tpg_lun_lock);
  292. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  293. lun = &tpg->tpg_lun_list[i];
  294. if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
  295. (lun->lun_se_dev == NULL))
  296. continue;
  297. spin_unlock(&tpg->tpg_lun_lock);
  298. ret = core_dev_del_lun(tpg, lun->unpacked_lun);
  299. spin_lock(&tpg->tpg_lun_lock);
  300. }
  301. spin_unlock(&tpg->tpg_lun_lock);
  302. }
  303. EXPORT_SYMBOL(core_tpg_clear_object_luns);
  304. /* core_tpg_add_initiator_node_acl():
  305. *
  306. *
  307. */
  308. struct se_node_acl *core_tpg_add_initiator_node_acl(
  309. struct se_portal_group *tpg,
  310. struct se_node_acl *se_nacl,
  311. const char *initiatorname,
  312. u32 queue_depth)
  313. {
  314. struct se_node_acl *acl = NULL;
  315. spin_lock_irq(&tpg->acl_node_lock);
  316. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  317. if (acl) {
  318. if (acl->dynamic_node_acl) {
  319. acl->dynamic_node_acl = 0;
  320. pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
  321. " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  322. tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
  323. spin_unlock_irq(&tpg->acl_node_lock);
  324. /*
  325. * Release the locally allocated struct se_node_acl
  326. * because * core_tpg_add_initiator_node_acl() returned
  327. * a pointer to an existing demo mode node ACL.
  328. */
  329. if (se_nacl)
  330. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
  331. se_nacl);
  332. goto done;
  333. }
  334. pr_err("ACL entry for %s Initiator"
  335. " Node %s already exists for TPG %u, ignoring"
  336. " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
  337. initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
  338. spin_unlock_irq(&tpg->acl_node_lock);
  339. return ERR_PTR(-EEXIST);
  340. }
  341. spin_unlock_irq(&tpg->acl_node_lock);
  342. if (!se_nacl) {
  343. pr_err("struct se_node_acl pointer is NULL\n");
  344. return ERR_PTR(-EINVAL);
  345. }
  346. /*
  347. * For v4.x logic the se_node_acl_s is hanging off a fabric
  348. * dependent structure allocated via
  349. * struct target_core_fabric_ops->fabric_make_nodeacl()
  350. */
  351. acl = se_nacl;
  352. INIT_LIST_HEAD(&acl->acl_list);
  353. INIT_LIST_HEAD(&acl->acl_sess_list);
  354. spin_lock_init(&acl->device_list_lock);
  355. spin_lock_init(&acl->nacl_sess_lock);
  356. atomic_set(&acl->acl_pr_ref_count, 0);
  357. acl->queue_depth = queue_depth;
  358. snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  359. acl->se_tpg = tpg;
  360. acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
  361. spin_lock_init(&acl->stats_lock);
  362. tpg->se_tpg_tfo->set_default_node_attributes(acl);
  363. if (core_create_device_list_for_node(acl) < 0) {
  364. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  365. return ERR_PTR(-ENOMEM);
  366. }
  367. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  368. core_free_device_list_for_node(acl, tpg);
  369. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  370. return ERR_PTR(-EINVAL);
  371. }
  372. spin_lock_irq(&tpg->acl_node_lock);
  373. list_add_tail(&acl->acl_list, &tpg->acl_node_list);
  374. tpg->num_node_acls++;
  375. spin_unlock_irq(&tpg->acl_node_lock);
  376. done:
  377. pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
  378. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  379. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  380. tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
  381. return acl;
  382. }
  383. EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
  384. /* core_tpg_del_initiator_node_acl():
  385. *
  386. *
  387. */
  388. int core_tpg_del_initiator_node_acl(
  389. struct se_portal_group *tpg,
  390. struct se_node_acl *acl,
  391. int force)
  392. {
  393. struct se_session *sess, *sess_tmp;
  394. int dynamic_acl = 0;
  395. spin_lock_irq(&tpg->acl_node_lock);
  396. if (acl->dynamic_node_acl) {
  397. acl->dynamic_node_acl = 0;
  398. dynamic_acl = 1;
  399. }
  400. list_del(&acl->acl_list);
  401. tpg->num_node_acls--;
  402. spin_unlock_irq(&tpg->acl_node_lock);
  403. spin_lock_bh(&tpg->session_lock);
  404. list_for_each_entry_safe(sess, sess_tmp,
  405. &tpg->tpg_sess_list, sess_list) {
  406. if (sess->se_node_acl != acl)
  407. continue;
  408. /*
  409. * Determine if the session needs to be closed by our context.
  410. */
  411. if (!tpg->se_tpg_tfo->shutdown_session(sess))
  412. continue;
  413. spin_unlock_bh(&tpg->session_lock);
  414. /*
  415. * If the $FABRIC_MOD session for the Initiator Node ACL exists,
  416. * forcefully shutdown the $FABRIC_MOD session/nexus.
  417. */
  418. tpg->se_tpg_tfo->close_session(sess);
  419. spin_lock_bh(&tpg->session_lock);
  420. }
  421. spin_unlock_bh(&tpg->session_lock);
  422. core_tpg_wait_for_nacl_pr_ref(acl);
  423. core_clear_initiator_node_from_tpg(acl, tpg);
  424. core_free_device_list_for_node(acl, tpg);
  425. pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
  426. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  427. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  428. tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
  429. return 0;
  430. }
  431. EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
  432. /* core_tpg_set_initiator_node_queue_depth():
  433. *
  434. *
  435. */
  436. int core_tpg_set_initiator_node_queue_depth(
  437. struct se_portal_group *tpg,
  438. unsigned char *initiatorname,
  439. u32 queue_depth,
  440. int force)
  441. {
  442. struct se_session *sess, *init_sess = NULL;
  443. struct se_node_acl *acl;
  444. int dynamic_acl = 0;
  445. spin_lock_irq(&tpg->acl_node_lock);
  446. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  447. if (!acl) {
  448. pr_err("Access Control List entry for %s Initiator"
  449. " Node %s does not exists for TPG %hu, ignoring"
  450. " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
  451. initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
  452. spin_unlock_irq(&tpg->acl_node_lock);
  453. return -ENODEV;
  454. }
  455. if (acl->dynamic_node_acl) {
  456. acl->dynamic_node_acl = 0;
  457. dynamic_acl = 1;
  458. }
  459. spin_unlock_irq(&tpg->acl_node_lock);
  460. spin_lock_bh(&tpg->session_lock);
  461. list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
  462. if (sess->se_node_acl != acl)
  463. continue;
  464. if (!force) {
  465. pr_err("Unable to change queue depth for %s"
  466. " Initiator Node: %s while session is"
  467. " operational. To forcefully change the queue"
  468. " depth and force session reinstatement"
  469. " use the \"force=1\" parameter.\n",
  470. tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
  471. spin_unlock_bh(&tpg->session_lock);
  472. spin_lock_irq(&tpg->acl_node_lock);
  473. if (dynamic_acl)
  474. acl->dynamic_node_acl = 1;
  475. spin_unlock_irq(&tpg->acl_node_lock);
  476. return -EEXIST;
  477. }
  478. /*
  479. * Determine if the session needs to be closed by our context.
  480. */
  481. if (!tpg->se_tpg_tfo->shutdown_session(sess))
  482. continue;
  483. init_sess = sess;
  484. break;
  485. }
  486. /*
  487. * User has requested to change the queue depth for a Initiator Node.
  488. * Change the value in the Node's struct se_node_acl, and call
  489. * core_set_queue_depth_for_node() to add the requested queue depth.
  490. *
  491. * Finally call tpg->se_tpg_tfo->close_session() to force session
  492. * reinstatement to occur if there is an active session for the
  493. * $FABRIC_MOD Initiator Node in question.
  494. */
  495. acl->queue_depth = queue_depth;
  496. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  497. spin_unlock_bh(&tpg->session_lock);
  498. /*
  499. * Force session reinstatement if
  500. * core_set_queue_depth_for_node() failed, because we assume
  501. * the $FABRIC_MOD has already the set session reinstatement
  502. * bit from tpg->se_tpg_tfo->shutdown_session() called above.
  503. */
  504. if (init_sess)
  505. tpg->se_tpg_tfo->close_session(init_sess);
  506. spin_lock_irq(&tpg->acl_node_lock);
  507. if (dynamic_acl)
  508. acl->dynamic_node_acl = 1;
  509. spin_unlock_irq(&tpg->acl_node_lock);
  510. return -EINVAL;
  511. }
  512. spin_unlock_bh(&tpg->session_lock);
  513. /*
  514. * If the $FABRIC_MOD session for the Initiator Node ACL exists,
  515. * forcefully shutdown the $FABRIC_MOD session/nexus.
  516. */
  517. if (init_sess)
  518. tpg->se_tpg_tfo->close_session(init_sess);
  519. pr_debug("Successfully changed queue depth to: %d for Initiator"
  520. " Node: %s on %s Target Portal Group: %u\n", queue_depth,
  521. initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
  522. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  523. spin_lock_irq(&tpg->acl_node_lock);
  524. if (dynamic_acl)
  525. acl->dynamic_node_acl = 1;
  526. spin_unlock_irq(&tpg->acl_node_lock);
  527. return 0;
  528. }
  529. EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
  530. static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
  531. {
  532. /* Set in core_dev_setup_virtual_lun0() */
  533. struct se_device *dev = g_lun0_dev;
  534. struct se_lun *lun = &se_tpg->tpg_virt_lun0;
  535. u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  536. int ret;
  537. lun->unpacked_lun = 0;
  538. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  539. atomic_set(&lun->lun_acl_count, 0);
  540. init_completion(&lun->lun_shutdown_comp);
  541. INIT_LIST_HEAD(&lun->lun_acl_list);
  542. INIT_LIST_HEAD(&lun->lun_cmd_list);
  543. spin_lock_init(&lun->lun_acl_lock);
  544. spin_lock_init(&lun->lun_cmd_lock);
  545. spin_lock_init(&lun->lun_sep_lock);
  546. ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
  547. if (ret < 0)
  548. return ret;
  549. return 0;
  550. }
  551. static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
  552. {
  553. struct se_lun *lun = &se_tpg->tpg_virt_lun0;
  554. core_tpg_post_dellun(se_tpg, lun);
  555. }
  556. int core_tpg_register(
  557. struct target_core_fabric_ops *tfo,
  558. struct se_wwn *se_wwn,
  559. struct se_portal_group *se_tpg,
  560. void *tpg_fabric_ptr,
  561. int se_tpg_type)
  562. {
  563. struct se_lun *lun;
  564. u32 i;
  565. se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
  566. TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
  567. if (!se_tpg->tpg_lun_list) {
  568. pr_err("Unable to allocate struct se_portal_group->"
  569. "tpg_lun_list\n");
  570. return -ENOMEM;
  571. }
  572. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  573. lun = &se_tpg->tpg_lun_list[i];
  574. lun->unpacked_lun = i;
  575. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  576. atomic_set(&lun->lun_acl_count, 0);
  577. init_completion(&lun->lun_shutdown_comp);
  578. INIT_LIST_HEAD(&lun->lun_acl_list);
  579. INIT_LIST_HEAD(&lun->lun_cmd_list);
  580. spin_lock_init(&lun->lun_acl_lock);
  581. spin_lock_init(&lun->lun_cmd_lock);
  582. spin_lock_init(&lun->lun_sep_lock);
  583. }
  584. se_tpg->se_tpg_type = se_tpg_type;
  585. se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
  586. se_tpg->se_tpg_tfo = tfo;
  587. se_tpg->se_tpg_wwn = se_wwn;
  588. atomic_set(&se_tpg->tpg_pr_ref_count, 0);
  589. INIT_LIST_HEAD(&se_tpg->acl_node_list);
  590. INIT_LIST_HEAD(&se_tpg->se_tpg_node);
  591. INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
  592. spin_lock_init(&se_tpg->acl_node_lock);
  593. spin_lock_init(&se_tpg->session_lock);
  594. spin_lock_init(&se_tpg->tpg_lun_lock);
  595. if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
  596. if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
  597. kfree(se_tpg);
  598. return -ENOMEM;
  599. }
  600. }
  601. spin_lock_bh(&tpg_lock);
  602. list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
  603. spin_unlock_bh(&tpg_lock);
  604. pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
  605. " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
  606. (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
  607. "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
  608. "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
  609. return 0;
  610. }
  611. EXPORT_SYMBOL(core_tpg_register);
  612. int core_tpg_deregister(struct se_portal_group *se_tpg)
  613. {
  614. struct se_node_acl *nacl, *nacl_tmp;
  615. pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
  616. " for endpoint: %s Portal Tag %u\n",
  617. (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
  618. "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
  619. se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
  620. se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
  621. spin_lock_bh(&tpg_lock);
  622. list_del(&se_tpg->se_tpg_node);
  623. spin_unlock_bh(&tpg_lock);
  624. while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
  625. cpu_relax();
  626. /*
  627. * Release any remaining demo-mode generated se_node_acl that have
  628. * not been released because of TFO->tpg_check_demo_mode_cache() == 1
  629. * in transport_deregister_session().
  630. */
  631. spin_lock_irq(&se_tpg->acl_node_lock);
  632. list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
  633. acl_list) {
  634. list_del(&nacl->acl_list);
  635. se_tpg->num_node_acls--;
  636. spin_unlock_irq(&se_tpg->acl_node_lock);
  637. core_tpg_wait_for_nacl_pr_ref(nacl);
  638. core_free_device_list_for_node(nacl, se_tpg);
  639. se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
  640. spin_lock_irq(&se_tpg->acl_node_lock);
  641. }
  642. spin_unlock_irq(&se_tpg->acl_node_lock);
  643. if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
  644. core_tpg_release_virtual_lun0(se_tpg);
  645. se_tpg->se_tpg_fabric_ptr = NULL;
  646. kfree(se_tpg->tpg_lun_list);
  647. return 0;
  648. }
  649. EXPORT_SYMBOL(core_tpg_deregister);
  650. struct se_lun *core_tpg_pre_addlun(
  651. struct se_portal_group *tpg,
  652. u32 unpacked_lun)
  653. {
  654. struct se_lun *lun;
  655. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  656. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
  657. "-1: %u for Target Portal Group: %u\n",
  658. tpg->se_tpg_tfo->get_fabric_name(),
  659. unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
  660. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  661. return ERR_PTR(-EOVERFLOW);
  662. }
  663. spin_lock(&tpg->tpg_lun_lock);
  664. lun = &tpg->tpg_lun_list[unpacked_lun];
  665. if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
  666. pr_err("TPG Logical Unit Number: %u is already active"
  667. " on %s Target Portal Group: %u, ignoring request.\n",
  668. unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
  669. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  670. spin_unlock(&tpg->tpg_lun_lock);
  671. return ERR_PTR(-EINVAL);
  672. }
  673. spin_unlock(&tpg->tpg_lun_lock);
  674. return lun;
  675. }
  676. int core_tpg_post_addlun(
  677. struct se_portal_group *tpg,
  678. struct se_lun *lun,
  679. u32 lun_access,
  680. void *lun_ptr)
  681. {
  682. int ret;
  683. ret = core_dev_export(lun_ptr, tpg, lun);
  684. if (ret < 0)
  685. return ret;
  686. spin_lock(&tpg->tpg_lun_lock);
  687. lun->lun_access = lun_access;
  688. lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
  689. spin_unlock(&tpg->tpg_lun_lock);
  690. return 0;
  691. }
  692. static void core_tpg_shutdown_lun(
  693. struct se_portal_group *tpg,
  694. struct se_lun *lun)
  695. {
  696. core_clear_lun_from_tpg(lun, tpg);
  697. transport_clear_lun_from_sessions(lun);
  698. }
  699. struct se_lun *core_tpg_pre_dellun(
  700. struct se_portal_group *tpg,
  701. u32 unpacked_lun,
  702. int *ret)
  703. {
  704. struct se_lun *lun;
  705. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  706. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
  707. "-1: %u for Target Portal Group: %u\n",
  708. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  709. TRANSPORT_MAX_LUNS_PER_TPG-1,
  710. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  711. return ERR_PTR(-EOVERFLOW);
  712. }
  713. spin_lock(&tpg->tpg_lun_lock);
  714. lun = &tpg->tpg_lun_list[unpacked_lun];
  715. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  716. pr_err("%s Logical Unit Number: %u is not active on"
  717. " Target Portal Group: %u, ignoring request.\n",
  718. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  719. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  720. spin_unlock(&tpg->tpg_lun_lock);
  721. return ERR_PTR(-ENODEV);
  722. }
  723. spin_unlock(&tpg->tpg_lun_lock);
  724. return lun;
  725. }
  726. int core_tpg_post_dellun(
  727. struct se_portal_group *tpg,
  728. struct se_lun *lun)
  729. {
  730. core_tpg_shutdown_lun(tpg, lun);
  731. core_dev_unexport(lun->lun_se_dev, tpg, lun);
  732. spin_lock(&tpg->tpg_lun_lock);
  733. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  734. spin_unlock(&tpg->tpg_lun_lock);
  735. return 0;
  736. }