target_core_tpg.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*******************************************************************************
  2. * Filename: target_core_tpg.c
  3. *
  4. * This file contains generic Target Portal Group related functions.
  5. *
  6. * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
  7. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  8. * Copyright (c) 2007-2010 Rising Tide Systems
  9. * Copyright (c) 2008-2010 Linux-iSCSI.org
  10. *
  11. * Nicholas A. Bellinger <nab@kernel.org>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. *
  27. ******************************************************************************/
  28. #include <linux/net.h>
  29. #include <linux/string.h>
  30. #include <linux/timer.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/smp_lock.h>
  34. #include <linux/in.h>
  35. #include <net/sock.h>
  36. #include <net/tcp.h>
  37. #include <scsi/scsi.h>
  38. #include <scsi/scsi_cmnd.h>
  39. #include <target/target_core_base.h>
  40. #include <target/target_core_device.h>
  41. #include <target/target_core_tpg.h>
  42. #include <target/target_core_transport.h>
  43. #include <target/target_core_fabric_ops.h>
  44. #include "target_core_hba.h"
  45. /* core_clear_initiator_node_from_tpg():
  46. *
  47. *
  48. */
  49. static void core_clear_initiator_node_from_tpg(
  50. struct se_node_acl *nacl,
  51. struct se_portal_group *tpg)
  52. {
  53. int i;
  54. struct se_dev_entry *deve;
  55. struct se_lun *lun;
  56. struct se_lun_acl *acl, *acl_tmp;
  57. spin_lock_irq(&nacl->device_list_lock);
  58. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  59. deve = &nacl->device_list[i];
  60. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  61. continue;
  62. if (!deve->se_lun) {
  63. printk(KERN_ERR "%s device entries device pointer is"
  64. " NULL, but Initiator has access.\n",
  65. TPG_TFO(tpg)->get_fabric_name());
  66. continue;
  67. }
  68. lun = deve->se_lun;
  69. spin_unlock_irq(&nacl->device_list_lock);
  70. core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
  71. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
  72. spin_lock(&lun->lun_acl_lock);
  73. list_for_each_entry_safe(acl, acl_tmp,
  74. &lun->lun_acl_list, lacl_list) {
  75. if (!(strcmp(acl->initiatorname,
  76. nacl->initiatorname)) &&
  77. (acl->mapped_lun == deve->mapped_lun))
  78. break;
  79. }
  80. if (!acl) {
  81. printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
  82. " mapped_lun: %u\n", nacl->initiatorname,
  83. deve->mapped_lun);
  84. spin_unlock(&lun->lun_acl_lock);
  85. spin_lock_irq(&nacl->device_list_lock);
  86. continue;
  87. }
  88. list_del(&acl->lacl_list);
  89. spin_unlock(&lun->lun_acl_lock);
  90. spin_lock_irq(&nacl->device_list_lock);
  91. kfree(acl);
  92. }
  93. spin_unlock_irq(&nacl->device_list_lock);
  94. }
  95. /* __core_tpg_get_initiator_node_acl():
  96. *
  97. * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
  98. */
  99. struct se_node_acl *__core_tpg_get_initiator_node_acl(
  100. struct se_portal_group *tpg,
  101. const char *initiatorname)
  102. {
  103. struct se_node_acl *acl;
  104. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  105. if (!(strcmp(acl->initiatorname, initiatorname)))
  106. return acl;
  107. }
  108. return NULL;
  109. }
  110. /* core_tpg_get_initiator_node_acl():
  111. *
  112. *
  113. */
  114. struct se_node_acl *core_tpg_get_initiator_node_acl(
  115. struct se_portal_group *tpg,
  116. unsigned char *initiatorname)
  117. {
  118. struct se_node_acl *acl;
  119. spin_lock_bh(&tpg->acl_node_lock);
  120. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  121. if (!(strcmp(acl->initiatorname, initiatorname)) &&
  122. (!(acl->dynamic_node_acl))) {
  123. spin_unlock_bh(&tpg->acl_node_lock);
  124. return acl;
  125. }
  126. }
  127. spin_unlock_bh(&tpg->acl_node_lock);
  128. return NULL;
  129. }
  130. /* core_tpg_add_node_to_devs():
  131. *
  132. *
  133. */
  134. void core_tpg_add_node_to_devs(
  135. struct se_node_acl *acl,
  136. struct se_portal_group *tpg)
  137. {
  138. int i = 0;
  139. u32 lun_access = 0;
  140. struct se_lun *lun;
  141. struct se_device *dev;
  142. spin_lock(&tpg->tpg_lun_lock);
  143. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  144. lun = &tpg->tpg_lun_list[i];
  145. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
  146. continue;
  147. spin_unlock(&tpg->tpg_lun_lock);
  148. dev = lun->lun_se_dev;
  149. /*
  150. * By default in LIO-Target $FABRIC_MOD,
  151. * demo_mode_write_protect is ON, or READ_ONLY;
  152. */
  153. if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
  154. if (dev->dev_flags & DF_READ_ONLY)
  155. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  156. else
  157. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  158. } else {
  159. /*
  160. * Allow only optical drives to issue R/W in default RO
  161. * demo mode.
  162. */
  163. if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
  164. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  165. else
  166. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  167. }
  168. printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
  169. " access for LUN in Demo Mode\n",
  170. TPG_TFO(tpg)->get_fabric_name(),
  171. TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
  172. (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
  173. "READ-WRITE" : "READ-ONLY");
  174. core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
  175. lun_access, acl, tpg, 1);
  176. spin_lock(&tpg->tpg_lun_lock);
  177. }
  178. spin_unlock(&tpg->tpg_lun_lock);
  179. }
  180. /* core_set_queue_depth_for_node():
  181. *
  182. *
  183. */
  184. static int core_set_queue_depth_for_node(
  185. struct se_portal_group *tpg,
  186. struct se_node_acl *acl)
  187. {
  188. if (!acl->queue_depth) {
  189. printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
  190. "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
  191. acl->initiatorname);
  192. acl->queue_depth = 1;
  193. }
  194. return 0;
  195. }
  196. /* core_create_device_list_for_node():
  197. *
  198. *
  199. */
  200. static int core_create_device_list_for_node(struct se_node_acl *nacl)
  201. {
  202. struct se_dev_entry *deve;
  203. int i;
  204. nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
  205. TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
  206. if (!(nacl->device_list)) {
  207. printk(KERN_ERR "Unable to allocate memory for"
  208. " struct se_node_acl->device_list\n");
  209. return -1;
  210. }
  211. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  212. deve = &nacl->device_list[i];
  213. atomic_set(&deve->ua_count, 0);
  214. atomic_set(&deve->pr_ref_count, 0);
  215. spin_lock_init(&deve->ua_lock);
  216. INIT_LIST_HEAD(&deve->alua_port_list);
  217. INIT_LIST_HEAD(&deve->ua_list);
  218. }
  219. return 0;
  220. }
  221. /* core_tpg_check_initiator_node_acl()
  222. *
  223. *
  224. */
  225. struct se_node_acl *core_tpg_check_initiator_node_acl(
  226. struct se_portal_group *tpg,
  227. unsigned char *initiatorname)
  228. {
  229. struct se_node_acl *acl;
  230. acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
  231. if ((acl))
  232. return acl;
  233. if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
  234. return NULL;
  235. acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
  236. if (!(acl))
  237. return NULL;
  238. INIT_LIST_HEAD(&acl->acl_list);
  239. INIT_LIST_HEAD(&acl->acl_sess_list);
  240. spin_lock_init(&acl->device_list_lock);
  241. spin_lock_init(&acl->nacl_sess_lock);
  242. atomic_set(&acl->acl_pr_ref_count, 0);
  243. atomic_set(&acl->mib_ref_count, 0);
  244. acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
  245. snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  246. acl->se_tpg = tpg;
  247. acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
  248. spin_lock_init(&acl->stats_lock);
  249. acl->dynamic_node_acl = 1;
  250. TPG_TFO(tpg)->set_default_node_attributes(acl);
  251. if (core_create_device_list_for_node(acl) < 0) {
  252. TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
  253. return NULL;
  254. }
  255. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  256. core_free_device_list_for_node(acl, tpg);
  257. TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
  258. return NULL;
  259. }
  260. core_tpg_add_node_to_devs(acl, tpg);
  261. spin_lock_bh(&tpg->acl_node_lock);
  262. list_add_tail(&acl->acl_list, &tpg->acl_node_list);
  263. tpg->num_node_acls++;
  264. spin_unlock_bh(&tpg->acl_node_lock);
  265. printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
  266. " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
  267. TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
  268. TPG_TFO(tpg)->get_fabric_name(), initiatorname);
  269. return acl;
  270. }
  271. EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
  272. void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
  273. {
  274. while (atomic_read(&nacl->acl_pr_ref_count) != 0)
  275. cpu_relax();
  276. }
  277. void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
  278. {
  279. while (atomic_read(&nacl->mib_ref_count) != 0)
  280. cpu_relax();
  281. }
  282. void core_tpg_clear_object_luns(struct se_portal_group *tpg)
  283. {
  284. int i, ret;
  285. struct se_lun *lun;
  286. spin_lock(&tpg->tpg_lun_lock);
  287. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  288. lun = &tpg->tpg_lun_list[i];
  289. if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
  290. (lun->lun_se_dev == NULL))
  291. continue;
  292. spin_unlock(&tpg->tpg_lun_lock);
  293. ret = core_dev_del_lun(tpg, lun->unpacked_lun);
  294. spin_lock(&tpg->tpg_lun_lock);
  295. }
  296. spin_unlock(&tpg->tpg_lun_lock);
  297. }
  298. EXPORT_SYMBOL(core_tpg_clear_object_luns);
  299. /* core_tpg_add_initiator_node_acl():
  300. *
  301. *
  302. */
  303. struct se_node_acl *core_tpg_add_initiator_node_acl(
  304. struct se_portal_group *tpg,
  305. struct se_node_acl *se_nacl,
  306. const char *initiatorname,
  307. u32 queue_depth)
  308. {
  309. struct se_node_acl *acl = NULL;
  310. spin_lock_bh(&tpg->acl_node_lock);
  311. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  312. if ((acl)) {
  313. if (acl->dynamic_node_acl) {
  314. acl->dynamic_node_acl = 0;
  315. printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
  316. " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
  317. TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
  318. spin_unlock_bh(&tpg->acl_node_lock);
  319. /*
  320. * Release the locally allocated struct se_node_acl
  321. * because * core_tpg_add_initiator_node_acl() returned
  322. * a pointer to an existing demo mode node ACL.
  323. */
  324. if (se_nacl)
  325. TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
  326. se_nacl);
  327. goto done;
  328. }
  329. printk(KERN_ERR "ACL entry for %s Initiator"
  330. " Node %s already exists for TPG %u, ignoring"
  331. " request.\n", TPG_TFO(tpg)->get_fabric_name(),
  332. initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
  333. spin_unlock_bh(&tpg->acl_node_lock);
  334. return ERR_PTR(-EEXIST);
  335. }
  336. spin_unlock_bh(&tpg->acl_node_lock);
  337. if (!(se_nacl)) {
  338. printk("struct se_node_acl pointer is NULL\n");
  339. return ERR_PTR(-EINVAL);
  340. }
  341. /*
  342. * For v4.x logic the se_node_acl_s is hanging off a fabric
  343. * dependent structure allocated via
  344. * struct target_core_fabric_ops->fabric_make_nodeacl()
  345. */
  346. acl = se_nacl;
  347. INIT_LIST_HEAD(&acl->acl_list);
  348. INIT_LIST_HEAD(&acl->acl_sess_list);
  349. spin_lock_init(&acl->device_list_lock);
  350. spin_lock_init(&acl->nacl_sess_lock);
  351. atomic_set(&acl->acl_pr_ref_count, 0);
  352. acl->queue_depth = queue_depth;
  353. snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  354. acl->se_tpg = tpg;
  355. acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
  356. spin_lock_init(&acl->stats_lock);
  357. TPG_TFO(tpg)->set_default_node_attributes(acl);
  358. if (core_create_device_list_for_node(acl) < 0) {
  359. TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
  360. return ERR_PTR(-ENOMEM);
  361. }
  362. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  363. core_free_device_list_for_node(acl, tpg);
  364. TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
  365. return ERR_PTR(-EINVAL);
  366. }
  367. spin_lock_bh(&tpg->acl_node_lock);
  368. list_add_tail(&acl->acl_list, &tpg->acl_node_list);
  369. tpg->num_node_acls++;
  370. spin_unlock_bh(&tpg->acl_node_lock);
  371. done:
  372. printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
  373. " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
  374. TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
  375. TPG_TFO(tpg)->get_fabric_name(), initiatorname);
  376. return acl;
  377. }
  378. EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
  379. /* core_tpg_del_initiator_node_acl():
  380. *
  381. *
  382. */
  383. int core_tpg_del_initiator_node_acl(
  384. struct se_portal_group *tpg,
  385. struct se_node_acl *acl,
  386. int force)
  387. {
  388. struct se_session *sess, *sess_tmp;
  389. int dynamic_acl = 0;
  390. spin_lock_bh(&tpg->acl_node_lock);
  391. if (acl->dynamic_node_acl) {
  392. acl->dynamic_node_acl = 0;
  393. dynamic_acl = 1;
  394. }
  395. list_del(&acl->acl_list);
  396. tpg->num_node_acls--;
  397. spin_unlock_bh(&tpg->acl_node_lock);
  398. spin_lock_bh(&tpg->session_lock);
  399. list_for_each_entry_safe(sess, sess_tmp,
  400. &tpg->tpg_sess_list, sess_list) {
  401. if (sess->se_node_acl != acl)
  402. continue;
  403. /*
  404. * Determine if the session needs to be closed by our context.
  405. */
  406. if (!(TPG_TFO(tpg)->shutdown_session(sess)))
  407. continue;
  408. spin_unlock_bh(&tpg->session_lock);
  409. /*
  410. * If the $FABRIC_MOD session for the Initiator Node ACL exists,
  411. * forcefully shutdown the $FABRIC_MOD session/nexus.
  412. */
  413. TPG_TFO(tpg)->close_session(sess);
  414. spin_lock_bh(&tpg->session_lock);
  415. }
  416. spin_unlock_bh(&tpg->session_lock);
  417. core_tpg_wait_for_nacl_pr_ref(acl);
  418. core_tpg_wait_for_mib_ref(acl);
  419. core_clear_initiator_node_from_tpg(acl, tpg);
  420. core_free_device_list_for_node(acl, tpg);
  421. printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
  422. " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
  423. TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
  424. TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
  425. return 0;
  426. }
  427. EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
  428. /* core_tpg_set_initiator_node_queue_depth():
  429. *
  430. *
  431. */
  432. int core_tpg_set_initiator_node_queue_depth(
  433. struct se_portal_group *tpg,
  434. unsigned char *initiatorname,
  435. u32 queue_depth,
  436. int force)
  437. {
  438. struct se_session *sess, *init_sess = NULL;
  439. struct se_node_acl *acl;
  440. int dynamic_acl = 0;
  441. spin_lock_bh(&tpg->acl_node_lock);
  442. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  443. if (!(acl)) {
  444. printk(KERN_ERR "Access Control List entry for %s Initiator"
  445. " Node %s does not exists for TPG %hu, ignoring"
  446. " request.\n", TPG_TFO(tpg)->get_fabric_name(),
  447. initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
  448. spin_unlock_bh(&tpg->acl_node_lock);
  449. return -ENODEV;
  450. }
  451. if (acl->dynamic_node_acl) {
  452. acl->dynamic_node_acl = 0;
  453. dynamic_acl = 1;
  454. }
  455. spin_unlock_bh(&tpg->acl_node_lock);
  456. spin_lock_bh(&tpg->session_lock);
  457. list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
  458. if (sess->se_node_acl != acl)
  459. continue;
  460. if (!force) {
  461. printk(KERN_ERR "Unable to change queue depth for %s"
  462. " Initiator Node: %s while session is"
  463. " operational. To forcefully change the queue"
  464. " depth and force session reinstatement"
  465. " use the \"force=1\" parameter.\n",
  466. TPG_TFO(tpg)->get_fabric_name(), initiatorname);
  467. spin_unlock_bh(&tpg->session_lock);
  468. spin_lock_bh(&tpg->acl_node_lock);
  469. if (dynamic_acl)
  470. acl->dynamic_node_acl = 1;
  471. spin_unlock_bh(&tpg->acl_node_lock);
  472. return -EEXIST;
  473. }
  474. /*
  475. * Determine if the session needs to be closed by our context.
  476. */
  477. if (!(TPG_TFO(tpg)->shutdown_session(sess)))
  478. continue;
  479. init_sess = sess;
  480. break;
  481. }
  482. /*
  483. * User has requested to change the queue depth for a Initiator Node.
  484. * Change the value in the Node's struct se_node_acl, and call
  485. * core_set_queue_depth_for_node() to add the requested queue depth.
  486. *
  487. * Finally call TPG_TFO(tpg)->close_session() to force session
  488. * reinstatement to occur if there is an active session for the
  489. * $FABRIC_MOD Initiator Node in question.
  490. */
  491. acl->queue_depth = queue_depth;
  492. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  493. spin_unlock_bh(&tpg->session_lock);
  494. /*
  495. * Force session reinstatement if
  496. * core_set_queue_depth_for_node() failed, because we assume
  497. * the $FABRIC_MOD has already the set session reinstatement
  498. * bit from TPG_TFO(tpg)->shutdown_session() called above.
  499. */
  500. if (init_sess)
  501. TPG_TFO(tpg)->close_session(init_sess);
  502. spin_lock_bh(&tpg->acl_node_lock);
  503. if (dynamic_acl)
  504. acl->dynamic_node_acl = 1;
  505. spin_unlock_bh(&tpg->acl_node_lock);
  506. return -EINVAL;
  507. }
  508. spin_unlock_bh(&tpg->session_lock);
  509. /*
  510. * If the $FABRIC_MOD session for the Initiator Node ACL exists,
  511. * forcefully shutdown the $FABRIC_MOD session/nexus.
  512. */
  513. if (init_sess)
  514. TPG_TFO(tpg)->close_session(init_sess);
  515. printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
  516. " Node: %s on %s Target Portal Group: %u\n", queue_depth,
  517. initiatorname, TPG_TFO(tpg)->get_fabric_name(),
  518. TPG_TFO(tpg)->tpg_get_tag(tpg));
  519. spin_lock_bh(&tpg->acl_node_lock);
  520. if (dynamic_acl)
  521. acl->dynamic_node_acl = 1;
  522. spin_unlock_bh(&tpg->acl_node_lock);
  523. return 0;
  524. }
  525. EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
  526. static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
  527. {
  528. /* Set in core_dev_setup_virtual_lun0() */
  529. struct se_device *dev = se_global->g_lun0_dev;
  530. struct se_lun *lun = &se_tpg->tpg_virt_lun0;
  531. u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  532. int ret;
  533. lun->unpacked_lun = 0;
  534. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  535. atomic_set(&lun->lun_acl_count, 0);
  536. init_completion(&lun->lun_shutdown_comp);
  537. INIT_LIST_HEAD(&lun->lun_acl_list);
  538. INIT_LIST_HEAD(&lun->lun_cmd_list);
  539. spin_lock_init(&lun->lun_acl_lock);
  540. spin_lock_init(&lun->lun_cmd_lock);
  541. spin_lock_init(&lun->lun_sep_lock);
  542. ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
  543. if (ret < 0)
  544. return -1;
  545. return 0;
  546. }
  547. static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
  548. {
  549. struct se_lun *lun = &se_tpg->tpg_virt_lun0;
  550. core_tpg_post_dellun(se_tpg, lun);
  551. }
  552. int core_tpg_register(
  553. struct target_core_fabric_ops *tfo,
  554. struct se_wwn *se_wwn,
  555. struct se_portal_group *se_tpg,
  556. void *tpg_fabric_ptr,
  557. int se_tpg_type)
  558. {
  559. struct se_lun *lun;
  560. u32 i;
  561. se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
  562. TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
  563. if (!(se_tpg->tpg_lun_list)) {
  564. printk(KERN_ERR "Unable to allocate struct se_portal_group->"
  565. "tpg_lun_list\n");
  566. return -ENOMEM;
  567. }
  568. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  569. lun = &se_tpg->tpg_lun_list[i];
  570. lun->unpacked_lun = i;
  571. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  572. atomic_set(&lun->lun_acl_count, 0);
  573. init_completion(&lun->lun_shutdown_comp);
  574. INIT_LIST_HEAD(&lun->lun_acl_list);
  575. INIT_LIST_HEAD(&lun->lun_cmd_list);
  576. spin_lock_init(&lun->lun_acl_lock);
  577. spin_lock_init(&lun->lun_cmd_lock);
  578. spin_lock_init(&lun->lun_sep_lock);
  579. }
  580. se_tpg->se_tpg_type = se_tpg_type;
  581. se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
  582. se_tpg->se_tpg_tfo = tfo;
  583. se_tpg->se_tpg_wwn = se_wwn;
  584. atomic_set(&se_tpg->tpg_pr_ref_count, 0);
  585. INIT_LIST_HEAD(&se_tpg->acl_node_list);
  586. INIT_LIST_HEAD(&se_tpg->se_tpg_list);
  587. INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
  588. spin_lock_init(&se_tpg->acl_node_lock);
  589. spin_lock_init(&se_tpg->session_lock);
  590. spin_lock_init(&se_tpg->tpg_lun_lock);
  591. if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
  592. if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
  593. kfree(se_tpg);
  594. return -ENOMEM;
  595. }
  596. }
  597. spin_lock_bh(&se_global->se_tpg_lock);
  598. list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
  599. spin_unlock_bh(&se_global->se_tpg_lock);
  600. printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
  601. " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
  602. (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
  603. "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
  604. "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
  605. return 0;
  606. }
  607. EXPORT_SYMBOL(core_tpg_register);
  608. int core_tpg_deregister(struct se_portal_group *se_tpg)
  609. {
  610. printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
  611. " for endpoint: %s Portal Tag %u\n",
  612. (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
  613. "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
  614. TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
  615. TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
  616. spin_lock_bh(&se_global->se_tpg_lock);
  617. list_del(&se_tpg->se_tpg_list);
  618. spin_unlock_bh(&se_global->se_tpg_lock);
  619. while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
  620. cpu_relax();
  621. if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
  622. core_tpg_release_virtual_lun0(se_tpg);
  623. se_tpg->se_tpg_fabric_ptr = NULL;
  624. kfree(se_tpg->tpg_lun_list);
  625. return 0;
  626. }
  627. EXPORT_SYMBOL(core_tpg_deregister);
  628. struct se_lun *core_tpg_pre_addlun(
  629. struct se_portal_group *tpg,
  630. u32 unpacked_lun)
  631. {
  632. struct se_lun *lun;
  633. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  634. printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
  635. "-1: %u for Target Portal Group: %u\n",
  636. TPG_TFO(tpg)->get_fabric_name(),
  637. unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
  638. TPG_TFO(tpg)->tpg_get_tag(tpg));
  639. return ERR_PTR(-EOVERFLOW);
  640. }
  641. spin_lock(&tpg->tpg_lun_lock);
  642. lun = &tpg->tpg_lun_list[unpacked_lun];
  643. if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
  644. printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
  645. " on %s Target Portal Group: %u, ignoring request.\n",
  646. unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
  647. TPG_TFO(tpg)->tpg_get_tag(tpg));
  648. spin_unlock(&tpg->tpg_lun_lock);
  649. return ERR_PTR(-EINVAL);
  650. }
  651. spin_unlock(&tpg->tpg_lun_lock);
  652. return lun;
  653. }
  654. int core_tpg_post_addlun(
  655. struct se_portal_group *tpg,
  656. struct se_lun *lun,
  657. u32 lun_access,
  658. void *lun_ptr)
  659. {
  660. if (core_dev_export(lun_ptr, tpg, lun) < 0)
  661. return -1;
  662. spin_lock(&tpg->tpg_lun_lock);
  663. lun->lun_access = lun_access;
  664. lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
  665. spin_unlock(&tpg->tpg_lun_lock);
  666. return 0;
  667. }
  668. static void core_tpg_shutdown_lun(
  669. struct se_portal_group *tpg,
  670. struct se_lun *lun)
  671. {
  672. core_clear_lun_from_tpg(lun, tpg);
  673. transport_clear_lun_from_sessions(lun);
  674. }
  675. struct se_lun *core_tpg_pre_dellun(
  676. struct se_portal_group *tpg,
  677. u32 unpacked_lun,
  678. int *ret)
  679. {
  680. struct se_lun *lun;
  681. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  682. printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
  683. "-1: %u for Target Portal Group: %u\n",
  684. TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
  685. TRANSPORT_MAX_LUNS_PER_TPG-1,
  686. TPG_TFO(tpg)->tpg_get_tag(tpg));
  687. return ERR_PTR(-EOVERFLOW);
  688. }
  689. spin_lock(&tpg->tpg_lun_lock);
  690. lun = &tpg->tpg_lun_list[unpacked_lun];
  691. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  692. printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
  693. " Target Portal Group: %u, ignoring request.\n",
  694. TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
  695. TPG_TFO(tpg)->tpg_get_tag(tpg));
  696. spin_unlock(&tpg->tpg_lun_lock);
  697. return ERR_PTR(-ENODEV);
  698. }
  699. spin_unlock(&tpg->tpg_lun_lock);
  700. return lun;
  701. }
  702. int core_tpg_post_dellun(
  703. struct se_portal_group *tpg,
  704. struct se_lun *lun)
  705. {
  706. core_tpg_shutdown_lun(tpg, lun);
  707. core_dev_unexport(lun->lun_se_dev, tpg, lun);
  708. spin_lock(&tpg->tpg_lun_lock);
  709. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  710. spin_unlock(&tpg->tpg_lun_lock);
  711. return 0;
  712. }