target_core_tpg.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*******************************************************************************
  2. * Filename: target_core_tpg.c
  3. *
  4. * This file contains generic Target Portal Group related functions.
  5. *
  6. * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
  7. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  8. * Copyright (c) 2007-2010 Rising Tide Systems
  9. * Copyright (c) 2008-2010 Linux-iSCSI.org
  10. *
  11. * Nicholas A. Bellinger <nab@kernel.org>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. *
  27. ******************************************************************************/
  28. #include <linux/net.h>
  29. #include <linux/string.h>
  30. #include <linux/timer.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/in.h>
  34. #include <linux/export.h>
  35. #include <net/sock.h>
  36. #include <net/tcp.h>
  37. #include <scsi/scsi.h>
  38. #include <scsi/scsi_cmnd.h>
  39. #include <target/target_core_base.h>
  40. #include <target/target_core_backend.h>
  41. #include <target/target_core_fabric.h>
  42. #include "target_core_internal.h"
  43. extern struct se_device *g_lun0_dev;
  44. static DEFINE_SPINLOCK(tpg_lock);
  45. static LIST_HEAD(tpg_list);
  46. /* core_clear_initiator_node_from_tpg():
  47. *
  48. *
  49. */
  50. static void core_clear_initiator_node_from_tpg(
  51. struct se_node_acl *nacl,
  52. struct se_portal_group *tpg)
  53. {
  54. int i;
  55. struct se_dev_entry *deve;
  56. struct se_lun *lun;
  57. struct se_lun_acl *acl, *acl_tmp;
  58. spin_lock_irq(&nacl->device_list_lock);
  59. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  60. deve = nacl->device_list[i];
  61. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  62. continue;
  63. if (!deve->se_lun) {
  64. pr_err("%s device entries device pointer is"
  65. " NULL, but Initiator has access.\n",
  66. tpg->se_tpg_tfo->get_fabric_name());
  67. continue;
  68. }
  69. lun = deve->se_lun;
  70. spin_unlock_irq(&nacl->device_list_lock);
  71. core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
  72. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
  73. spin_lock(&lun->lun_acl_lock);
  74. list_for_each_entry_safe(acl, acl_tmp,
  75. &lun->lun_acl_list, lacl_list) {
  76. if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
  77. (acl->mapped_lun == deve->mapped_lun))
  78. break;
  79. }
  80. if (!acl) {
  81. pr_err("Unable to locate struct se_lun_acl for %s,"
  82. " mapped_lun: %u\n", nacl->initiatorname,
  83. deve->mapped_lun);
  84. spin_unlock(&lun->lun_acl_lock);
  85. spin_lock_irq(&nacl->device_list_lock);
  86. continue;
  87. }
  88. list_del(&acl->lacl_list);
  89. spin_unlock(&lun->lun_acl_lock);
  90. spin_lock_irq(&nacl->device_list_lock);
  91. kfree(acl);
  92. }
  93. spin_unlock_irq(&nacl->device_list_lock);
  94. }
  95. /* __core_tpg_get_initiator_node_acl():
  96. *
  97. * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
  98. */
  99. struct se_node_acl *__core_tpg_get_initiator_node_acl(
  100. struct se_portal_group *tpg,
  101. const char *initiatorname)
  102. {
  103. struct se_node_acl *acl;
  104. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  105. if (!strcmp(acl->initiatorname, initiatorname))
  106. return acl;
  107. }
  108. return NULL;
  109. }
  110. /* core_tpg_get_initiator_node_acl():
  111. *
  112. *
  113. */
  114. struct se_node_acl *core_tpg_get_initiator_node_acl(
  115. struct se_portal_group *tpg,
  116. unsigned char *initiatorname)
  117. {
  118. struct se_node_acl *acl;
  119. spin_lock_irq(&tpg->acl_node_lock);
  120. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  121. if (!strcmp(acl->initiatorname, initiatorname) &&
  122. !acl->dynamic_node_acl) {
  123. spin_unlock_irq(&tpg->acl_node_lock);
  124. return acl;
  125. }
  126. }
  127. spin_unlock_irq(&tpg->acl_node_lock);
  128. return NULL;
  129. }
  130. /* core_tpg_add_node_to_devs():
  131. *
  132. *
  133. */
  134. void core_tpg_add_node_to_devs(
  135. struct se_node_acl *acl,
  136. struct se_portal_group *tpg)
  137. {
  138. int i = 0;
  139. u32 lun_access = 0;
  140. struct se_lun *lun;
  141. struct se_device *dev;
  142. spin_lock(&tpg->tpg_lun_lock);
  143. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  144. lun = tpg->tpg_lun_list[i];
  145. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
  146. continue;
  147. spin_unlock(&tpg->tpg_lun_lock);
  148. dev = lun->lun_se_dev;
  149. /*
  150. * By default in LIO-Target $FABRIC_MOD,
  151. * demo_mode_write_protect is ON, or READ_ONLY;
  152. */
  153. if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
  154. if (dev->dev_flags & DF_READ_ONLY)
  155. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  156. else
  157. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  158. } else {
  159. /*
  160. * Allow only optical drives to issue R/W in default RO
  161. * demo mode.
  162. */
  163. if (dev->transport->get_device_type(dev) == TYPE_DISK)
  164. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  165. else
  166. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  167. }
  168. pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
  169. " access for LUN in Demo Mode\n",
  170. tpg->se_tpg_tfo->get_fabric_name(),
  171. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  172. (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
  173. "READ-WRITE" : "READ-ONLY");
  174. core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
  175. lun_access, acl, tpg, 1);
  176. spin_lock(&tpg->tpg_lun_lock);
  177. }
  178. spin_unlock(&tpg->tpg_lun_lock);
  179. }
  180. /* core_set_queue_depth_for_node():
  181. *
  182. *
  183. */
  184. static int core_set_queue_depth_for_node(
  185. struct se_portal_group *tpg,
  186. struct se_node_acl *acl)
  187. {
  188. if (!acl->queue_depth) {
  189. pr_err("Queue depth for %s Initiator Node: %s is 0,"
  190. "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
  191. acl->initiatorname);
  192. acl->queue_depth = 1;
  193. }
  194. return 0;
  195. }
  196. void array_free(void *array, int n)
  197. {
  198. void **a = array;
  199. int i;
  200. for (i = 0; i < n; i++)
  201. kfree(a[i]);
  202. kfree(a);
  203. }
  204. static void *array_zalloc(int n, size_t size, gfp_t flags)
  205. {
  206. void **a;
  207. int i;
  208. a = kzalloc(n * sizeof(void*), flags);
  209. if (!a)
  210. return NULL;
  211. for (i = 0; i < n; i++) {
  212. a[i] = kzalloc(size, flags);
  213. if (!a[i]) {
  214. array_free(a, n);
  215. return NULL;
  216. }
  217. }
  218. return a;
  219. }
  220. /* core_create_device_list_for_node():
  221. *
  222. *
  223. */
  224. static int core_create_device_list_for_node(struct se_node_acl *nacl)
  225. {
  226. struct se_dev_entry *deve;
  227. int i;
  228. nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
  229. sizeof(struct se_dev_entry), GFP_KERNEL);
  230. if (!nacl->device_list) {
  231. pr_err("Unable to allocate memory for"
  232. " struct se_node_acl->device_list\n");
  233. return -ENOMEM;
  234. }
  235. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  236. deve = nacl->device_list[i];
  237. atomic_set(&deve->ua_count, 0);
  238. atomic_set(&deve->pr_ref_count, 0);
  239. spin_lock_init(&deve->ua_lock);
  240. INIT_LIST_HEAD(&deve->alua_port_list);
  241. INIT_LIST_HEAD(&deve->ua_list);
  242. }
  243. return 0;
  244. }
  245. /* core_tpg_check_initiator_node_acl()
  246. *
  247. *
  248. */
  249. struct se_node_acl *core_tpg_check_initiator_node_acl(
  250. struct se_portal_group *tpg,
  251. unsigned char *initiatorname)
  252. {
  253. struct se_node_acl *acl;
  254. acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
  255. if (acl)
  256. return acl;
  257. if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
  258. return NULL;
  259. acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
  260. if (!acl)
  261. return NULL;
  262. INIT_LIST_HEAD(&acl->acl_list);
  263. INIT_LIST_HEAD(&acl->acl_sess_list);
  264. kref_init(&acl->acl_kref);
  265. init_completion(&acl->acl_free_comp);
  266. spin_lock_init(&acl->device_list_lock);
  267. spin_lock_init(&acl->nacl_sess_lock);
  268. atomic_set(&acl->acl_pr_ref_count, 0);
  269. acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
  270. snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  271. acl->se_tpg = tpg;
  272. acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
  273. spin_lock_init(&acl->stats_lock);
  274. acl->dynamic_node_acl = 1;
  275. tpg->se_tpg_tfo->set_default_node_attributes(acl);
  276. if (core_create_device_list_for_node(acl) < 0) {
  277. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  278. return NULL;
  279. }
  280. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  281. core_free_device_list_for_node(acl, tpg);
  282. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  283. return NULL;
  284. }
  285. /*
  286. * Here we only create demo-mode MappedLUNs from the active
  287. * TPG LUNs if the fabric is not explictly asking for
  288. * tpg_check_demo_mode_login_only() == 1.
  289. */
  290. if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
  291. (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
  292. do { ; } while (0);
  293. else
  294. core_tpg_add_node_to_devs(acl, tpg);
  295. spin_lock_irq(&tpg->acl_node_lock);
  296. list_add_tail(&acl->acl_list, &tpg->acl_node_list);
  297. tpg->num_node_acls++;
  298. spin_unlock_irq(&tpg->acl_node_lock);
  299. pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
  300. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  301. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  302. tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
  303. return acl;
  304. }
  305. EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
  306. void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
  307. {
  308. while (atomic_read(&nacl->acl_pr_ref_count) != 0)
  309. cpu_relax();
  310. }
  311. void core_tpg_clear_object_luns(struct se_portal_group *tpg)
  312. {
  313. int i;
  314. struct se_lun *lun;
  315. spin_lock(&tpg->tpg_lun_lock);
  316. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  317. lun = tpg->tpg_lun_list[i];
  318. if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
  319. (lun->lun_se_dev == NULL))
  320. continue;
  321. spin_unlock(&tpg->tpg_lun_lock);
  322. core_dev_del_lun(tpg, lun->unpacked_lun);
  323. spin_lock(&tpg->tpg_lun_lock);
  324. }
  325. spin_unlock(&tpg->tpg_lun_lock);
  326. }
  327. EXPORT_SYMBOL(core_tpg_clear_object_luns);
  328. /* core_tpg_add_initiator_node_acl():
  329. *
  330. *
  331. */
  332. struct se_node_acl *core_tpg_add_initiator_node_acl(
  333. struct se_portal_group *tpg,
  334. struct se_node_acl *se_nacl,
  335. const char *initiatorname,
  336. u32 queue_depth)
  337. {
  338. struct se_node_acl *acl = NULL;
  339. spin_lock_irq(&tpg->acl_node_lock);
  340. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  341. if (acl) {
  342. if (acl->dynamic_node_acl) {
  343. acl->dynamic_node_acl = 0;
  344. pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
  345. " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  346. tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
  347. spin_unlock_irq(&tpg->acl_node_lock);
  348. /*
  349. * Release the locally allocated struct se_node_acl
  350. * because * core_tpg_add_initiator_node_acl() returned
  351. * a pointer to an existing demo mode node ACL.
  352. */
  353. if (se_nacl)
  354. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
  355. se_nacl);
  356. goto done;
  357. }
  358. pr_err("ACL entry for %s Initiator"
  359. " Node %s already exists for TPG %u, ignoring"
  360. " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
  361. initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
  362. spin_unlock_irq(&tpg->acl_node_lock);
  363. return ERR_PTR(-EEXIST);
  364. }
  365. spin_unlock_irq(&tpg->acl_node_lock);
  366. if (!se_nacl) {
  367. pr_err("struct se_node_acl pointer is NULL\n");
  368. return ERR_PTR(-EINVAL);
  369. }
  370. /*
  371. * For v4.x logic the se_node_acl_s is hanging off a fabric
  372. * dependent structure allocated via
  373. * struct target_core_fabric_ops->fabric_make_nodeacl()
  374. */
  375. acl = se_nacl;
  376. INIT_LIST_HEAD(&acl->acl_list);
  377. INIT_LIST_HEAD(&acl->acl_sess_list);
  378. kref_init(&acl->acl_kref);
  379. init_completion(&acl->acl_free_comp);
  380. spin_lock_init(&acl->device_list_lock);
  381. spin_lock_init(&acl->nacl_sess_lock);
  382. atomic_set(&acl->acl_pr_ref_count, 0);
  383. acl->queue_depth = queue_depth;
  384. snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  385. acl->se_tpg = tpg;
  386. acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
  387. spin_lock_init(&acl->stats_lock);
  388. tpg->se_tpg_tfo->set_default_node_attributes(acl);
  389. if (core_create_device_list_for_node(acl) < 0) {
  390. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  391. return ERR_PTR(-ENOMEM);
  392. }
  393. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  394. core_free_device_list_for_node(acl, tpg);
  395. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  396. return ERR_PTR(-EINVAL);
  397. }
  398. spin_lock_irq(&tpg->acl_node_lock);
  399. list_add_tail(&acl->acl_list, &tpg->acl_node_list);
  400. tpg->num_node_acls++;
  401. spin_unlock_irq(&tpg->acl_node_lock);
  402. done:
  403. pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
  404. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  405. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  406. tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
  407. return acl;
  408. }
  409. EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
  410. /* core_tpg_del_initiator_node_acl():
  411. *
  412. *
  413. */
  414. int core_tpg_del_initiator_node_acl(
  415. struct se_portal_group *tpg,
  416. struct se_node_acl *acl,
  417. int force)
  418. {
  419. LIST_HEAD(sess_list);
  420. struct se_session *sess, *sess_tmp;
  421. unsigned long flags;
  422. int rc;
  423. spin_lock_irq(&tpg->acl_node_lock);
  424. if (acl->dynamic_node_acl) {
  425. acl->dynamic_node_acl = 0;
  426. }
  427. list_del(&acl->acl_list);
  428. tpg->num_node_acls--;
  429. spin_unlock_irq(&tpg->acl_node_lock);
  430. spin_lock_irqsave(&acl->nacl_sess_lock, flags);
  431. acl->acl_stop = 1;
  432. list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
  433. sess_acl_list) {
  434. if (sess->sess_tearing_down != 0)
  435. continue;
  436. target_get_session(sess);
  437. list_move(&sess->sess_acl_list, &sess_list);
  438. }
  439. spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
  440. list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
  441. list_del(&sess->sess_acl_list);
  442. rc = tpg->se_tpg_tfo->shutdown_session(sess);
  443. target_put_session(sess);
  444. if (!rc)
  445. continue;
  446. target_put_session(sess);
  447. }
  448. target_put_nacl(acl);
  449. /*
  450. * Wait for last target_put_nacl() to complete in target_complete_nacl()
  451. * for active fabric session transport_deregister_session() callbacks.
  452. */
  453. wait_for_completion(&acl->acl_free_comp);
  454. core_tpg_wait_for_nacl_pr_ref(acl);
  455. core_clear_initiator_node_from_tpg(acl, tpg);
  456. core_free_device_list_for_node(acl, tpg);
  457. pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
  458. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  459. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  460. tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
  461. return 0;
  462. }
  463. EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
  464. /* core_tpg_set_initiator_node_queue_depth():
  465. *
  466. *
  467. */
  468. int core_tpg_set_initiator_node_queue_depth(
  469. struct se_portal_group *tpg,
  470. unsigned char *initiatorname,
  471. u32 queue_depth,
  472. int force)
  473. {
  474. struct se_session *sess, *init_sess = NULL;
  475. struct se_node_acl *acl;
  476. unsigned long flags;
  477. int dynamic_acl = 0;
  478. spin_lock_irq(&tpg->acl_node_lock);
  479. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  480. if (!acl) {
  481. pr_err("Access Control List entry for %s Initiator"
  482. " Node %s does not exists for TPG %hu, ignoring"
  483. " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
  484. initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
  485. spin_unlock_irq(&tpg->acl_node_lock);
  486. return -ENODEV;
  487. }
  488. if (acl->dynamic_node_acl) {
  489. acl->dynamic_node_acl = 0;
  490. dynamic_acl = 1;
  491. }
  492. spin_unlock_irq(&tpg->acl_node_lock);
  493. spin_lock_irqsave(&tpg->session_lock, flags);
  494. list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
  495. if (sess->se_node_acl != acl)
  496. continue;
  497. if (!force) {
  498. pr_err("Unable to change queue depth for %s"
  499. " Initiator Node: %s while session is"
  500. " operational. To forcefully change the queue"
  501. " depth and force session reinstatement"
  502. " use the \"force=1\" parameter.\n",
  503. tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
  504. spin_unlock_irqrestore(&tpg->session_lock, flags);
  505. spin_lock_irq(&tpg->acl_node_lock);
  506. if (dynamic_acl)
  507. acl->dynamic_node_acl = 1;
  508. spin_unlock_irq(&tpg->acl_node_lock);
  509. return -EEXIST;
  510. }
  511. /*
  512. * Determine if the session needs to be closed by our context.
  513. */
  514. if (!tpg->se_tpg_tfo->shutdown_session(sess))
  515. continue;
  516. init_sess = sess;
  517. break;
  518. }
  519. /*
  520. * User has requested to change the queue depth for a Initiator Node.
  521. * Change the value in the Node's struct se_node_acl, and call
  522. * core_set_queue_depth_for_node() to add the requested queue depth.
  523. *
  524. * Finally call tpg->se_tpg_tfo->close_session() to force session
  525. * reinstatement to occur if there is an active session for the
  526. * $FABRIC_MOD Initiator Node in question.
  527. */
  528. acl->queue_depth = queue_depth;
  529. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  530. spin_unlock_irqrestore(&tpg->session_lock, flags);
  531. /*
  532. * Force session reinstatement if
  533. * core_set_queue_depth_for_node() failed, because we assume
  534. * the $FABRIC_MOD has already the set session reinstatement
  535. * bit from tpg->se_tpg_tfo->shutdown_session() called above.
  536. */
  537. if (init_sess)
  538. tpg->se_tpg_tfo->close_session(init_sess);
  539. spin_lock_irq(&tpg->acl_node_lock);
  540. if (dynamic_acl)
  541. acl->dynamic_node_acl = 1;
  542. spin_unlock_irq(&tpg->acl_node_lock);
  543. return -EINVAL;
  544. }
  545. spin_unlock_irqrestore(&tpg->session_lock, flags);
  546. /*
  547. * If the $FABRIC_MOD session for the Initiator Node ACL exists,
  548. * forcefully shutdown the $FABRIC_MOD session/nexus.
  549. */
  550. if (init_sess)
  551. tpg->se_tpg_tfo->close_session(init_sess);
  552. pr_debug("Successfully changed queue depth to: %d for Initiator"
  553. " Node: %s on %s Target Portal Group: %u\n", queue_depth,
  554. initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
  555. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  556. spin_lock_irq(&tpg->acl_node_lock);
  557. if (dynamic_acl)
  558. acl->dynamic_node_acl = 1;
  559. spin_unlock_irq(&tpg->acl_node_lock);
  560. return 0;
  561. }
  562. EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
  563. static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
  564. {
  565. /* Set in core_dev_setup_virtual_lun0() */
  566. struct se_device *dev = g_lun0_dev;
  567. struct se_lun *lun = &se_tpg->tpg_virt_lun0;
  568. u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  569. int ret;
  570. lun->unpacked_lun = 0;
  571. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  572. atomic_set(&lun->lun_acl_count, 0);
  573. init_completion(&lun->lun_shutdown_comp);
  574. INIT_LIST_HEAD(&lun->lun_acl_list);
  575. INIT_LIST_HEAD(&lun->lun_cmd_list);
  576. spin_lock_init(&lun->lun_acl_lock);
  577. spin_lock_init(&lun->lun_cmd_lock);
  578. spin_lock_init(&lun->lun_sep_lock);
  579. ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
  580. if (ret < 0)
  581. return ret;
  582. return 0;
  583. }
  584. static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
  585. {
  586. struct se_lun *lun = &se_tpg->tpg_virt_lun0;
  587. core_tpg_post_dellun(se_tpg, lun);
  588. }
  589. int core_tpg_register(
  590. struct target_core_fabric_ops *tfo,
  591. struct se_wwn *se_wwn,
  592. struct se_portal_group *se_tpg,
  593. void *tpg_fabric_ptr,
  594. int se_tpg_type)
  595. {
  596. struct se_lun *lun;
  597. u32 i;
  598. se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
  599. sizeof(struct se_lun), GFP_KERNEL);
  600. if (!se_tpg->tpg_lun_list) {
  601. pr_err("Unable to allocate struct se_portal_group->"
  602. "tpg_lun_list\n");
  603. return -ENOMEM;
  604. }
  605. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  606. lun = se_tpg->tpg_lun_list[i];
  607. lun->unpacked_lun = i;
  608. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  609. atomic_set(&lun->lun_acl_count, 0);
  610. init_completion(&lun->lun_shutdown_comp);
  611. INIT_LIST_HEAD(&lun->lun_acl_list);
  612. INIT_LIST_HEAD(&lun->lun_cmd_list);
  613. spin_lock_init(&lun->lun_acl_lock);
  614. spin_lock_init(&lun->lun_cmd_lock);
  615. spin_lock_init(&lun->lun_sep_lock);
  616. }
  617. se_tpg->se_tpg_type = se_tpg_type;
  618. se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
  619. se_tpg->se_tpg_tfo = tfo;
  620. se_tpg->se_tpg_wwn = se_wwn;
  621. atomic_set(&se_tpg->tpg_pr_ref_count, 0);
  622. INIT_LIST_HEAD(&se_tpg->acl_node_list);
  623. INIT_LIST_HEAD(&se_tpg->se_tpg_node);
  624. INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
  625. spin_lock_init(&se_tpg->acl_node_lock);
  626. spin_lock_init(&se_tpg->session_lock);
  627. spin_lock_init(&se_tpg->tpg_lun_lock);
  628. if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
  629. if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
  630. kfree(se_tpg);
  631. return -ENOMEM;
  632. }
  633. }
  634. spin_lock_bh(&tpg_lock);
  635. list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
  636. spin_unlock_bh(&tpg_lock);
  637. pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
  638. " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
  639. (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
  640. "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
  641. "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
  642. return 0;
  643. }
  644. EXPORT_SYMBOL(core_tpg_register);
  645. int core_tpg_deregister(struct se_portal_group *se_tpg)
  646. {
  647. struct se_node_acl *nacl, *nacl_tmp;
  648. pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
  649. " for endpoint: %s Portal Tag %u\n",
  650. (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
  651. "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
  652. se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
  653. se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
  654. spin_lock_bh(&tpg_lock);
  655. list_del(&se_tpg->se_tpg_node);
  656. spin_unlock_bh(&tpg_lock);
  657. while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
  658. cpu_relax();
  659. /*
  660. * Release any remaining demo-mode generated se_node_acl that have
  661. * not been released because of TFO->tpg_check_demo_mode_cache() == 1
  662. * in transport_deregister_session().
  663. */
  664. spin_lock_irq(&se_tpg->acl_node_lock);
  665. list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
  666. acl_list) {
  667. list_del(&nacl->acl_list);
  668. se_tpg->num_node_acls--;
  669. spin_unlock_irq(&se_tpg->acl_node_lock);
  670. core_tpg_wait_for_nacl_pr_ref(nacl);
  671. core_free_device_list_for_node(nacl, se_tpg);
  672. se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
  673. spin_lock_irq(&se_tpg->acl_node_lock);
  674. }
  675. spin_unlock_irq(&se_tpg->acl_node_lock);
  676. if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
  677. core_tpg_release_virtual_lun0(se_tpg);
  678. se_tpg->se_tpg_fabric_ptr = NULL;
  679. array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
  680. return 0;
  681. }
  682. EXPORT_SYMBOL(core_tpg_deregister);
  683. struct se_lun *core_tpg_pre_addlun(
  684. struct se_portal_group *tpg,
  685. u32 unpacked_lun)
  686. {
  687. struct se_lun *lun;
  688. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  689. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
  690. "-1: %u for Target Portal Group: %u\n",
  691. tpg->se_tpg_tfo->get_fabric_name(),
  692. unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
  693. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  694. return ERR_PTR(-EOVERFLOW);
  695. }
  696. spin_lock(&tpg->tpg_lun_lock);
  697. lun = tpg->tpg_lun_list[unpacked_lun];
  698. if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
  699. pr_err("TPG Logical Unit Number: %u is already active"
  700. " on %s Target Portal Group: %u, ignoring request.\n",
  701. unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
  702. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  703. spin_unlock(&tpg->tpg_lun_lock);
  704. return ERR_PTR(-EINVAL);
  705. }
  706. spin_unlock(&tpg->tpg_lun_lock);
  707. return lun;
  708. }
  709. int core_tpg_post_addlun(
  710. struct se_portal_group *tpg,
  711. struct se_lun *lun,
  712. u32 lun_access,
  713. void *lun_ptr)
  714. {
  715. int ret;
  716. ret = core_dev_export(lun_ptr, tpg, lun);
  717. if (ret < 0)
  718. return ret;
  719. spin_lock(&tpg->tpg_lun_lock);
  720. lun->lun_access = lun_access;
  721. lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
  722. spin_unlock(&tpg->tpg_lun_lock);
  723. return 0;
  724. }
  725. static void core_tpg_shutdown_lun(
  726. struct se_portal_group *tpg,
  727. struct se_lun *lun)
  728. {
  729. core_clear_lun_from_tpg(lun, tpg);
  730. transport_clear_lun_from_sessions(lun);
  731. }
  732. struct se_lun *core_tpg_pre_dellun(
  733. struct se_portal_group *tpg,
  734. u32 unpacked_lun)
  735. {
  736. struct se_lun *lun;
  737. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  738. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
  739. "-1: %u for Target Portal Group: %u\n",
  740. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  741. TRANSPORT_MAX_LUNS_PER_TPG-1,
  742. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  743. return ERR_PTR(-EOVERFLOW);
  744. }
  745. spin_lock(&tpg->tpg_lun_lock);
  746. lun = tpg->tpg_lun_list[unpacked_lun];
  747. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  748. pr_err("%s Logical Unit Number: %u is not active on"
  749. " Target Portal Group: %u, ignoring request.\n",
  750. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  751. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  752. spin_unlock(&tpg->tpg_lun_lock);
  753. return ERR_PTR(-ENODEV);
  754. }
  755. spin_unlock(&tpg->tpg_lun_lock);
  756. return lun;
  757. }
  758. int core_tpg_post_dellun(
  759. struct se_portal_group *tpg,
  760. struct se_lun *lun)
  761. {
  762. core_tpg_shutdown_lun(tpg, lun);
  763. core_dev_unexport(lun->lun_se_dev, tpg, lun);
  764. spin_lock(&tpg->tpg_lun_lock);
  765. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  766. spin_unlock(&tpg->tpg_lun_lock);
  767. return 0;
  768. }