target_core_device.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697
  1. /*******************************************************************************
  2. * Filename: target_core_device.c (based on iscsi_target_device.c)
  3. *
  4. * This file contains the TCM Virtual Device and Disk Transport
  5. * agnostic related functions.
  6. *
  7. * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
  8. * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
  9. * Copyright (c) 2007-2010 Rising Tide Systems
  10. * Copyright (c) 2008-2010 Linux-iSCSI.org
  11. *
  12. * Nicholas A. Bellinger <nab@kernel.org>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  27. *
  28. ******************************************************************************/
  29. #include <linux/net.h>
  30. #include <linux/string.h>
  31. #include <linux/delay.h>
  32. #include <linux/timer.h>
  33. #include <linux/slab.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/kthread.h>
  36. #include <linux/in.h>
  37. #include <net/sock.h>
  38. #include <net/tcp.h>
  39. #include <scsi/scsi.h>
  40. #include <scsi/scsi_device.h>
  41. #include <target/target_core_base.h>
  42. #include <target/target_core_device.h>
  43. #include <target/target_core_tpg.h>
  44. #include <target/target_core_transport.h>
  45. #include <target/target_core_fabric_ops.h>
  46. #include "target_core_alua.h"
  47. #include "target_core_hba.h"
  48. #include "target_core_pr.h"
  49. #include "target_core_ua.h"
  50. static void se_dev_start(struct se_device *dev);
  51. static void se_dev_stop(struct se_device *dev);
  52. static struct se_hba *lun0_hba;
  53. static struct se_subsystem_dev *lun0_su_dev;
  54. /* not static, needed by tpg.c */
  55. struct se_device *g_lun0_dev;
  56. int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  57. {
  58. struct se_lun *se_lun = NULL;
  59. struct se_session *se_sess = se_cmd->se_sess;
  60. struct se_device *dev;
  61. unsigned long flags;
  62. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
  63. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  64. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  65. return -ENODEV;
  66. }
  67. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  68. se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
  69. if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  70. struct se_dev_entry *deve = se_cmd->se_deve;
  71. deve->total_cmds++;
  72. deve->total_bytes += se_cmd->data_length;
  73. if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  74. (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
  75. se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  76. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  77. pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  78. " Access for 0x%08x\n",
  79. se_cmd->se_tfo->get_fabric_name(),
  80. unpacked_lun);
  81. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  82. return -EACCES;
  83. }
  84. if (se_cmd->data_direction == DMA_TO_DEVICE)
  85. deve->write_bytes += se_cmd->data_length;
  86. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  87. deve->read_bytes += se_cmd->data_length;
  88. deve->deve_cmds++;
  89. se_lun = deve->se_lun;
  90. se_cmd->se_lun = deve->se_lun;
  91. se_cmd->pr_res_key = deve->pr_res_key;
  92. se_cmd->orig_fe_lun = unpacked_lun;
  93. se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
  94. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  95. }
  96. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  97. if (!se_lun) {
  98. /*
  99. * Use the se_portal_group->tpg_virt_lun0 to allow for
  100. * REPORT_LUNS, et al to be returned when no active
  101. * MappedLUN=0 exists for this Initiator Port.
  102. */
  103. if (unpacked_lun != 0) {
  104. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  105. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  106. pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  107. " Access for 0x%08x\n",
  108. se_cmd->se_tfo->get_fabric_name(),
  109. unpacked_lun);
  110. return -ENODEV;
  111. }
  112. /*
  113. * Force WRITE PROTECT for virtual LUN 0
  114. */
  115. if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
  116. (se_cmd->data_direction != DMA_NONE)) {
  117. se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  118. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  119. return -EACCES;
  120. }
  121. se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  122. se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  123. se_cmd->orig_fe_lun = 0;
  124. se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
  125. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  126. }
  127. /*
  128. * Determine if the struct se_lun is online.
  129. * FIXME: Check for LUN_RESET + UNIT Attention
  130. */
  131. if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
  132. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  133. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  134. return -ENODEV;
  135. }
  136. /* Directly associate cmd with se_dev */
  137. se_cmd->se_dev = se_lun->lun_se_dev;
  138. /* TODO: get rid of this and use atomics for stats */
  139. dev = se_lun->lun_se_dev;
  140. spin_lock_irqsave(&dev->stats_lock, flags);
  141. dev->num_cmds++;
  142. if (se_cmd->data_direction == DMA_TO_DEVICE)
  143. dev->write_bytes += se_cmd->data_length;
  144. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  145. dev->read_bytes += se_cmd->data_length;
  146. spin_unlock_irqrestore(&dev->stats_lock, flags);
  147. /*
  148. * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
  149. * for tracking state of struct se_cmds during LUN shutdown events.
  150. */
  151. spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
  152. list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
  153. atomic_set(&se_cmd->transport_lun_active, 1);
  154. spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
  155. return 0;
  156. }
  157. EXPORT_SYMBOL(transport_lookup_cmd_lun);
  158. int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  159. {
  160. struct se_dev_entry *deve;
  161. struct se_lun *se_lun = NULL;
  162. struct se_session *se_sess = se_cmd->se_sess;
  163. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  164. unsigned long flags;
  165. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
  166. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  167. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  168. return -ENODEV;
  169. }
  170. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  171. se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
  172. deve = se_cmd->se_deve;
  173. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  174. se_tmr->tmr_lun = deve->se_lun;
  175. se_cmd->se_lun = deve->se_lun;
  176. se_lun = deve->se_lun;
  177. se_cmd->pr_res_key = deve->pr_res_key;
  178. se_cmd->orig_fe_lun = unpacked_lun;
  179. se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
  180. }
  181. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  182. if (!se_lun) {
  183. pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  184. " Access for 0x%08x\n",
  185. se_cmd->se_tfo->get_fabric_name(),
  186. unpacked_lun);
  187. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  188. return -ENODEV;
  189. }
  190. /*
  191. * Determine if the struct se_lun is online.
  192. * FIXME: Check for LUN_RESET + UNIT Attention
  193. */
  194. if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
  195. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  196. return -ENODEV;
  197. }
  198. /* Directly associate cmd with se_dev */
  199. se_cmd->se_dev = se_lun->lun_se_dev;
  200. se_tmr->tmr_dev = se_lun->lun_se_dev;
  201. spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
  202. list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
  203. spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
  204. return 0;
  205. }
  206. EXPORT_SYMBOL(transport_lookup_tmr_lun);
  207. /*
  208. * This function is called from core_scsi3_emulate_pro_register_and_move()
  209. * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
  210. * when a matching rtpi is found.
  211. */
  212. struct se_dev_entry *core_get_se_deve_from_rtpi(
  213. struct se_node_acl *nacl,
  214. u16 rtpi)
  215. {
  216. struct se_dev_entry *deve;
  217. struct se_lun *lun;
  218. struct se_port *port;
  219. struct se_portal_group *tpg = nacl->se_tpg;
  220. u32 i;
  221. spin_lock_irq(&nacl->device_list_lock);
  222. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  223. deve = &nacl->device_list[i];
  224. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  225. continue;
  226. lun = deve->se_lun;
  227. if (!lun) {
  228. pr_err("%s device entries device pointer is"
  229. " NULL, but Initiator has access.\n",
  230. tpg->se_tpg_tfo->get_fabric_name());
  231. continue;
  232. }
  233. port = lun->lun_sep;
  234. if (!port) {
  235. pr_err("%s device entries device pointer is"
  236. " NULL, but Initiator has access.\n",
  237. tpg->se_tpg_tfo->get_fabric_name());
  238. continue;
  239. }
  240. if (port->sep_rtpi != rtpi)
  241. continue;
  242. atomic_inc(&deve->pr_ref_count);
  243. smp_mb__after_atomic_inc();
  244. spin_unlock_irq(&nacl->device_list_lock);
  245. return deve;
  246. }
  247. spin_unlock_irq(&nacl->device_list_lock);
  248. return NULL;
  249. }
  250. int core_free_device_list_for_node(
  251. struct se_node_acl *nacl,
  252. struct se_portal_group *tpg)
  253. {
  254. struct se_dev_entry *deve;
  255. struct se_lun *lun;
  256. u32 i;
  257. if (!nacl->device_list)
  258. return 0;
  259. spin_lock_irq(&nacl->device_list_lock);
  260. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  261. deve = &nacl->device_list[i];
  262. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  263. continue;
  264. if (!deve->se_lun) {
  265. pr_err("%s device entries device pointer is"
  266. " NULL, but Initiator has access.\n",
  267. tpg->se_tpg_tfo->get_fabric_name());
  268. continue;
  269. }
  270. lun = deve->se_lun;
  271. spin_unlock_irq(&nacl->device_list_lock);
  272. core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
  273. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
  274. spin_lock_irq(&nacl->device_list_lock);
  275. }
  276. spin_unlock_irq(&nacl->device_list_lock);
  277. kfree(nacl->device_list);
  278. nacl->device_list = NULL;
  279. return 0;
  280. }
  281. void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
  282. {
  283. struct se_dev_entry *deve;
  284. spin_lock_irq(&se_nacl->device_list_lock);
  285. deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
  286. deve->deve_cmds--;
  287. spin_unlock_irq(&se_nacl->device_list_lock);
  288. }
  289. void core_update_device_list_access(
  290. u32 mapped_lun,
  291. u32 lun_access,
  292. struct se_node_acl *nacl)
  293. {
  294. struct se_dev_entry *deve;
  295. spin_lock_irq(&nacl->device_list_lock);
  296. deve = &nacl->device_list[mapped_lun];
  297. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  298. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  299. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  300. } else {
  301. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  302. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  303. }
  304. spin_unlock_irq(&nacl->device_list_lock);
  305. }
  306. /* core_update_device_list_for_node():
  307. *
  308. *
  309. */
  310. int core_update_device_list_for_node(
  311. struct se_lun *lun,
  312. struct se_lun_acl *lun_acl,
  313. u32 mapped_lun,
  314. u32 lun_access,
  315. struct se_node_acl *nacl,
  316. struct se_portal_group *tpg,
  317. int enable)
  318. {
  319. struct se_port *port = lun->lun_sep;
  320. struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
  321. int trans = 0;
  322. /*
  323. * If the MappedLUN entry is being disabled, the entry in
  324. * port->sep_alua_list must be removed now before clearing the
  325. * struct se_dev_entry pointers below as logic in
  326. * core_alua_do_transition_tg_pt() depends on these being present.
  327. */
  328. if (!enable) {
  329. /*
  330. * deve->se_lun_acl will be NULL for demo-mode created LUNs
  331. * that have not been explicitly concerted to MappedLUNs ->
  332. * struct se_lun_acl, but we remove deve->alua_port_list from
  333. * port->sep_alua_list. This also means that active UAs and
  334. * NodeACL context specific PR metadata for demo-mode
  335. * MappedLUN *deve will be released below..
  336. */
  337. spin_lock_bh(&port->sep_alua_lock);
  338. list_del(&deve->alua_port_list);
  339. spin_unlock_bh(&port->sep_alua_lock);
  340. }
  341. spin_lock_irq(&nacl->device_list_lock);
  342. if (enable) {
  343. /*
  344. * Check if the call is handling demo mode -> explict LUN ACL
  345. * transition. This transition must be for the same struct se_lun
  346. * + mapped_lun that was setup in demo mode..
  347. */
  348. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  349. if (deve->se_lun_acl != NULL) {
  350. pr_err("struct se_dev_entry->se_lun_acl"
  351. " already set for demo mode -> explict"
  352. " LUN ACL transition\n");
  353. spin_unlock_irq(&nacl->device_list_lock);
  354. return -EINVAL;
  355. }
  356. if (deve->se_lun != lun) {
  357. pr_err("struct se_dev_entry->se_lun does"
  358. " match passed struct se_lun for demo mode"
  359. " -> explict LUN ACL transition\n");
  360. spin_unlock_irq(&nacl->device_list_lock);
  361. return -EINVAL;
  362. }
  363. deve->se_lun_acl = lun_acl;
  364. trans = 1;
  365. } else {
  366. deve->se_lun = lun;
  367. deve->se_lun_acl = lun_acl;
  368. deve->mapped_lun = mapped_lun;
  369. deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
  370. }
  371. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  372. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  373. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  374. } else {
  375. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  376. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  377. }
  378. if (trans) {
  379. spin_unlock_irq(&nacl->device_list_lock);
  380. return 0;
  381. }
  382. deve->creation_time = get_jiffies_64();
  383. deve->attach_count++;
  384. spin_unlock_irq(&nacl->device_list_lock);
  385. spin_lock_bh(&port->sep_alua_lock);
  386. list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
  387. spin_unlock_bh(&port->sep_alua_lock);
  388. return 0;
  389. }
  390. /*
  391. * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
  392. * PR operation to complete.
  393. */
  394. spin_unlock_irq(&nacl->device_list_lock);
  395. while (atomic_read(&deve->pr_ref_count) != 0)
  396. cpu_relax();
  397. spin_lock_irq(&nacl->device_list_lock);
  398. /*
  399. * Disable struct se_dev_entry LUN ACL mapping
  400. */
  401. core_scsi3_ua_release_all(deve);
  402. deve->se_lun = NULL;
  403. deve->se_lun_acl = NULL;
  404. deve->lun_flags = 0;
  405. deve->creation_time = 0;
  406. deve->attach_count--;
  407. spin_unlock_irq(&nacl->device_list_lock);
  408. core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
  409. return 0;
  410. }
  411. /* core_clear_lun_from_tpg():
  412. *
  413. *
  414. */
  415. void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
  416. {
  417. struct se_node_acl *nacl;
  418. struct se_dev_entry *deve;
  419. u32 i;
  420. spin_lock_bh(&tpg->acl_node_lock);
  421. list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
  422. spin_unlock_bh(&tpg->acl_node_lock);
  423. spin_lock_irq(&nacl->device_list_lock);
  424. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  425. deve = &nacl->device_list[i];
  426. if (lun != deve->se_lun)
  427. continue;
  428. spin_unlock_irq(&nacl->device_list_lock);
  429. core_update_device_list_for_node(lun, NULL,
  430. deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
  431. nacl, tpg, 0);
  432. spin_lock_irq(&nacl->device_list_lock);
  433. }
  434. spin_unlock_irq(&nacl->device_list_lock);
  435. spin_lock_bh(&tpg->acl_node_lock);
  436. }
  437. spin_unlock_bh(&tpg->acl_node_lock);
  438. }
  439. static struct se_port *core_alloc_port(struct se_device *dev)
  440. {
  441. struct se_port *port, *port_tmp;
  442. port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
  443. if (!port) {
  444. pr_err("Unable to allocate struct se_port\n");
  445. return ERR_PTR(-ENOMEM);
  446. }
  447. INIT_LIST_HEAD(&port->sep_alua_list);
  448. INIT_LIST_HEAD(&port->sep_list);
  449. atomic_set(&port->sep_tg_pt_secondary_offline, 0);
  450. spin_lock_init(&port->sep_alua_lock);
  451. mutex_init(&port->sep_tg_pt_md_mutex);
  452. spin_lock(&dev->se_port_lock);
  453. if (dev->dev_port_count == 0x0000ffff) {
  454. pr_warn("Reached dev->dev_port_count =="
  455. " 0x0000ffff\n");
  456. spin_unlock(&dev->se_port_lock);
  457. return ERR_PTR(-ENOSPC);
  458. }
  459. again:
  460. /*
  461. * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
  462. * Here is the table from spc4r17 section 7.7.3.8.
  463. *
  464. * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
  465. *
  466. * Code Description
  467. * 0h Reserved
  468. * 1h Relative port 1, historically known as port A
  469. * 2h Relative port 2, historically known as port B
  470. * 3h to FFFFh Relative port 3 through 65 535
  471. */
  472. port->sep_rtpi = dev->dev_rpti_counter++;
  473. if (!port->sep_rtpi)
  474. goto again;
  475. list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
  476. /*
  477. * Make sure RELATIVE TARGET PORT IDENTIFER is unique
  478. * for 16-bit wrap..
  479. */
  480. if (port->sep_rtpi == port_tmp->sep_rtpi)
  481. goto again;
  482. }
  483. spin_unlock(&dev->se_port_lock);
  484. return port;
  485. }
  486. static void core_export_port(
  487. struct se_device *dev,
  488. struct se_portal_group *tpg,
  489. struct se_port *port,
  490. struct se_lun *lun)
  491. {
  492. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  493. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
  494. spin_lock(&dev->se_port_lock);
  495. spin_lock(&lun->lun_sep_lock);
  496. port->sep_tpg = tpg;
  497. port->sep_lun = lun;
  498. lun->lun_sep = port;
  499. spin_unlock(&lun->lun_sep_lock);
  500. list_add_tail(&port->sep_list, &dev->dev_sep_list);
  501. spin_unlock(&dev->se_port_lock);
  502. if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
  503. tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
  504. if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
  505. pr_err("Unable to allocate t10_alua_tg_pt"
  506. "_gp_member_t\n");
  507. return;
  508. }
  509. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  510. __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
  511. su_dev->t10_alua.default_tg_pt_gp);
  512. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  513. pr_debug("%s/%s: Adding to default ALUA Target Port"
  514. " Group: alua/default_tg_pt_gp\n",
  515. dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
  516. }
  517. dev->dev_port_count++;
  518. port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
  519. }
  520. /*
  521. * Called with struct se_device->se_port_lock spinlock held.
  522. */
  523. static void core_release_port(struct se_device *dev, struct se_port *port)
  524. __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
  525. {
  526. /*
  527. * Wait for any port reference for PR ALL_TG_PT=1 operation
  528. * to complete in __core_scsi3_alloc_registration()
  529. */
  530. spin_unlock(&dev->se_port_lock);
  531. if (atomic_read(&port->sep_tg_pt_ref_cnt))
  532. cpu_relax();
  533. spin_lock(&dev->se_port_lock);
  534. core_alua_free_tg_pt_gp_mem(port);
  535. list_del(&port->sep_list);
  536. dev->dev_port_count--;
  537. kfree(port);
  538. }
  539. int core_dev_export(
  540. struct se_device *dev,
  541. struct se_portal_group *tpg,
  542. struct se_lun *lun)
  543. {
  544. struct se_port *port;
  545. port = core_alloc_port(dev);
  546. if (IS_ERR(port))
  547. return PTR_ERR(port);
  548. lun->lun_se_dev = dev;
  549. se_dev_start(dev);
  550. atomic_inc(&dev->dev_export_obj.obj_access_count);
  551. core_export_port(dev, tpg, port, lun);
  552. return 0;
  553. }
  554. void core_dev_unexport(
  555. struct se_device *dev,
  556. struct se_portal_group *tpg,
  557. struct se_lun *lun)
  558. {
  559. struct se_port *port = lun->lun_sep;
  560. spin_lock(&lun->lun_sep_lock);
  561. if (lun->lun_se_dev == NULL) {
  562. spin_unlock(&lun->lun_sep_lock);
  563. return;
  564. }
  565. spin_unlock(&lun->lun_sep_lock);
  566. spin_lock(&dev->se_port_lock);
  567. atomic_dec(&dev->dev_export_obj.obj_access_count);
  568. core_release_port(dev, port);
  569. spin_unlock(&dev->se_port_lock);
  570. se_dev_stop(dev);
  571. lun->lun_se_dev = NULL;
  572. }
  573. int transport_core_report_lun_response(struct se_cmd *se_cmd)
  574. {
  575. struct se_dev_entry *deve;
  576. struct se_lun *se_lun;
  577. struct se_session *se_sess = se_cmd->se_sess;
  578. struct se_task *se_task;
  579. unsigned char *buf;
  580. u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
  581. list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
  582. break;
  583. if (!se_task) {
  584. pr_err("Unable to locate struct se_task for struct se_cmd\n");
  585. return PYX_TRANSPORT_LU_COMM_FAILURE;
  586. }
  587. buf = transport_kmap_first_data_page(se_cmd);
  588. /*
  589. * If no struct se_session pointer is present, this struct se_cmd is
  590. * coming via a target_core_mod PASSTHROUGH op, and not through
  591. * a $FABRIC_MOD. In that case, report LUN=0 only.
  592. */
  593. if (!se_sess) {
  594. int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
  595. lun_count = 1;
  596. goto done;
  597. }
  598. spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
  599. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  600. deve = &se_sess->se_node_acl->device_list[i];
  601. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  602. continue;
  603. se_lun = deve->se_lun;
  604. /*
  605. * We determine the correct LUN LIST LENGTH even once we
  606. * have reached the initial allocation length.
  607. * See SPC2-R20 7.19.
  608. */
  609. lun_count++;
  610. if ((cdb_offset + 8) >= se_cmd->data_length)
  611. continue;
  612. int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
  613. offset += 8;
  614. cdb_offset += 8;
  615. }
  616. spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
  617. /*
  618. * See SPC3 r07, page 159.
  619. */
  620. done:
  621. transport_kunmap_first_data_page(se_cmd);
  622. lun_count *= 8;
  623. buf[0] = ((lun_count >> 24) & 0xff);
  624. buf[1] = ((lun_count >> 16) & 0xff);
  625. buf[2] = ((lun_count >> 8) & 0xff);
  626. buf[3] = (lun_count & 0xff);
  627. return PYX_TRANSPORT_SENT_TO_TRANSPORT;
  628. }
  629. /* se_release_device_for_hba():
  630. *
  631. *
  632. */
  633. void se_release_device_for_hba(struct se_device *dev)
  634. {
  635. struct se_hba *hba = dev->se_hba;
  636. if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
  637. (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
  638. (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
  639. (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
  640. (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
  641. se_dev_stop(dev);
  642. if (dev->dev_ptr) {
  643. kthread_stop(dev->process_thread);
  644. if (dev->transport->free_device)
  645. dev->transport->free_device(dev->dev_ptr);
  646. }
  647. spin_lock(&hba->device_lock);
  648. list_del(&dev->dev_list);
  649. hba->dev_count--;
  650. spin_unlock(&hba->device_lock);
  651. core_scsi3_free_all_registrations(dev);
  652. se_release_vpd_for_dev(dev);
  653. kfree(dev);
  654. }
  655. void se_release_vpd_for_dev(struct se_device *dev)
  656. {
  657. struct t10_vpd *vpd, *vpd_tmp;
  658. spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
  659. list_for_each_entry_safe(vpd, vpd_tmp,
  660. &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
  661. list_del(&vpd->vpd_list);
  662. kfree(vpd);
  663. }
  664. spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
  665. }
  666. /* se_free_virtual_device():
  667. *
  668. * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
  669. */
  670. int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
  671. {
  672. if (!list_empty(&dev->dev_sep_list))
  673. dump_stack();
  674. core_alua_free_lu_gp_mem(dev);
  675. se_release_device_for_hba(dev);
  676. return 0;
  677. }
  678. static void se_dev_start(struct se_device *dev)
  679. {
  680. struct se_hba *hba = dev->se_hba;
  681. spin_lock(&hba->device_lock);
  682. atomic_inc(&dev->dev_obj.obj_access_count);
  683. if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
  684. if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
  685. dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
  686. dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
  687. } else if (dev->dev_status &
  688. TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
  689. dev->dev_status &=
  690. ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
  691. dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
  692. }
  693. }
  694. spin_unlock(&hba->device_lock);
  695. }
  696. static void se_dev_stop(struct se_device *dev)
  697. {
  698. struct se_hba *hba = dev->se_hba;
  699. spin_lock(&hba->device_lock);
  700. atomic_dec(&dev->dev_obj.obj_access_count);
  701. if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
  702. if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
  703. dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
  704. dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
  705. } else if (dev->dev_status &
  706. TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
  707. dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
  708. dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
  709. }
  710. }
  711. spin_unlock(&hba->device_lock);
  712. }
  713. int se_dev_check_online(struct se_device *dev)
  714. {
  715. unsigned long flags;
  716. int ret;
  717. spin_lock_irqsave(&dev->dev_status_lock, flags);
  718. ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
  719. (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
  720. spin_unlock_irqrestore(&dev->dev_status_lock, flags);
  721. return ret;
  722. }
  723. int se_dev_check_shutdown(struct se_device *dev)
  724. {
  725. int ret;
  726. spin_lock_irq(&dev->dev_status_lock);
  727. ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
  728. spin_unlock_irq(&dev->dev_status_lock);
  729. return ret;
  730. }
  731. u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
  732. {
  733. u32 tmp, aligned_max_sectors;
  734. /*
  735. * Limit max_sectors to a PAGE_SIZE aligned value for modern
  736. * transport_allocate_data_tasks() operation.
  737. */
  738. tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
  739. aligned_max_sectors = (tmp / block_size);
  740. if (max_sectors != aligned_max_sectors) {
  741. printk(KERN_INFO "Rounding down aligned max_sectors from %u"
  742. " to %u\n", max_sectors, aligned_max_sectors);
  743. return aligned_max_sectors;
  744. }
  745. return max_sectors;
  746. }
  747. void se_dev_set_default_attribs(
  748. struct se_device *dev,
  749. struct se_dev_limits *dev_limits)
  750. {
  751. struct queue_limits *limits = &dev_limits->limits;
  752. dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
  753. dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
  754. dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
  755. dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
  756. dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
  757. dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
  758. dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
  759. dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
  760. dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
  761. dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
  762. dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
  763. dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
  764. dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
  765. /*
  766. * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
  767. * iblock_create_virtdevice() from struct queue_limits values
  768. * if blk_queue_discard()==1
  769. */
  770. dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
  771. dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
  772. DA_MAX_UNMAP_BLOCK_DESC_COUNT;
  773. dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
  774. dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
  775. DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
  776. /*
  777. * block_size is based on subsystem plugin dependent requirements.
  778. */
  779. dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
  780. dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
  781. /*
  782. * max_sectors is based on subsystem plugin dependent requirements.
  783. */
  784. dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
  785. /*
  786. * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
  787. */
  788. limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
  789. limits->logical_block_size);
  790. dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
  791. /*
  792. * Set optimal_sectors from max_sectors, which can be lowered via
  793. * configfs.
  794. */
  795. dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
  796. /*
  797. * queue_depth is based on subsystem plugin dependent requirements.
  798. */
  799. dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
  800. dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
  801. }
  802. int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
  803. {
  804. if (task_timeout > DA_TASK_TIMEOUT_MAX) {
  805. pr_err("dev[%p]: Passed task_timeout: %u larger then"
  806. " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
  807. return -EINVAL;
  808. } else {
  809. dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
  810. pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
  811. dev, task_timeout);
  812. }
  813. return 0;
  814. }
  815. int se_dev_set_max_unmap_lba_count(
  816. struct se_device *dev,
  817. u32 max_unmap_lba_count)
  818. {
  819. dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
  820. pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
  821. dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
  822. return 0;
  823. }
  824. int se_dev_set_max_unmap_block_desc_count(
  825. struct se_device *dev,
  826. u32 max_unmap_block_desc_count)
  827. {
  828. dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
  829. max_unmap_block_desc_count;
  830. pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
  831. dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
  832. return 0;
  833. }
  834. int se_dev_set_unmap_granularity(
  835. struct se_device *dev,
  836. u32 unmap_granularity)
  837. {
  838. dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
  839. pr_debug("dev[%p]: Set unmap_granularity: %u\n",
  840. dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
  841. return 0;
  842. }
  843. int se_dev_set_unmap_granularity_alignment(
  844. struct se_device *dev,
  845. u32 unmap_granularity_alignment)
  846. {
  847. dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
  848. pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
  849. dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
  850. return 0;
  851. }
  852. int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
  853. {
  854. if ((flag != 0) && (flag != 1)) {
  855. pr_err("Illegal value %d\n", flag);
  856. return -EINVAL;
  857. }
  858. if (dev->transport->dpo_emulated == NULL) {
  859. pr_err("dev->transport->dpo_emulated is NULL\n");
  860. return -EINVAL;
  861. }
  862. if (dev->transport->dpo_emulated(dev) == 0) {
  863. pr_err("dev->transport->dpo_emulated not supported\n");
  864. return -EINVAL;
  865. }
  866. dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
  867. pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
  868. " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
  869. return 0;
  870. }
  871. int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
  872. {
  873. if ((flag != 0) && (flag != 1)) {
  874. pr_err("Illegal value %d\n", flag);
  875. return -EINVAL;
  876. }
  877. if (dev->transport->fua_write_emulated == NULL) {
  878. pr_err("dev->transport->fua_write_emulated is NULL\n");
  879. return -EINVAL;
  880. }
  881. if (dev->transport->fua_write_emulated(dev) == 0) {
  882. pr_err("dev->transport->fua_write_emulated not supported\n");
  883. return -EINVAL;
  884. }
  885. dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
  886. pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
  887. dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
  888. return 0;
  889. }
  890. int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
  891. {
  892. if ((flag != 0) && (flag != 1)) {
  893. pr_err("Illegal value %d\n", flag);
  894. return -EINVAL;
  895. }
  896. if (dev->transport->fua_read_emulated == NULL) {
  897. pr_err("dev->transport->fua_read_emulated is NULL\n");
  898. return -EINVAL;
  899. }
  900. if (dev->transport->fua_read_emulated(dev) == 0) {
  901. pr_err("dev->transport->fua_read_emulated not supported\n");
  902. return -EINVAL;
  903. }
  904. dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
  905. pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
  906. dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
  907. return 0;
  908. }
  909. int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
  910. {
  911. if ((flag != 0) && (flag != 1)) {
  912. pr_err("Illegal value %d\n", flag);
  913. return -EINVAL;
  914. }
  915. if (dev->transport->write_cache_emulated == NULL) {
  916. pr_err("dev->transport->write_cache_emulated is NULL\n");
  917. return -EINVAL;
  918. }
  919. if (dev->transport->write_cache_emulated(dev) == 0) {
  920. pr_err("dev->transport->write_cache_emulated not supported\n");
  921. return -EINVAL;
  922. }
  923. dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
  924. pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
  925. dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
  926. return 0;
  927. }
  928. int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
  929. {
  930. if ((flag != 0) && (flag != 1) && (flag != 2)) {
  931. pr_err("Illegal value %d\n", flag);
  932. return -EINVAL;
  933. }
  934. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  935. pr_err("dev[%p]: Unable to change SE Device"
  936. " UA_INTRLCK_CTRL while dev_export_obj: %d count"
  937. " exists\n", dev,
  938. atomic_read(&dev->dev_export_obj.obj_access_count));
  939. return -EINVAL;
  940. }
  941. dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
  942. pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
  943. dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
  944. return 0;
  945. }
  946. int se_dev_set_emulate_tas(struct se_device *dev, int flag)
  947. {
  948. if ((flag != 0) && (flag != 1)) {
  949. pr_err("Illegal value %d\n", flag);
  950. return -EINVAL;
  951. }
  952. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  953. pr_err("dev[%p]: Unable to change SE Device TAS while"
  954. " dev_export_obj: %d count exists\n", dev,
  955. atomic_read(&dev->dev_export_obj.obj_access_count));
  956. return -EINVAL;
  957. }
  958. dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
  959. pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
  960. dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
  961. return 0;
  962. }
  963. int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
  964. {
  965. if ((flag != 0) && (flag != 1)) {
  966. pr_err("Illegal value %d\n", flag);
  967. return -EINVAL;
  968. }
  969. /*
  970. * We expect this value to be non-zero when generic Block Layer
  971. * Discard supported is detected iblock_create_virtdevice().
  972. */
  973. if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
  974. pr_err("Generic Block Discard not supported\n");
  975. return -ENOSYS;
  976. }
  977. dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
  978. pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
  979. dev, flag);
  980. return 0;
  981. }
  982. int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
  983. {
  984. if ((flag != 0) && (flag != 1)) {
  985. pr_err("Illegal value %d\n", flag);
  986. return -EINVAL;
  987. }
  988. /*
  989. * We expect this value to be non-zero when generic Block Layer
  990. * Discard supported is detected iblock_create_virtdevice().
  991. */
  992. if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
  993. pr_err("Generic Block Discard not supported\n");
  994. return -ENOSYS;
  995. }
  996. dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
  997. pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
  998. dev, flag);
  999. return 0;
  1000. }
  1001. int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
  1002. {
  1003. if ((flag != 0) && (flag != 1)) {
  1004. pr_err("Illegal value %d\n", flag);
  1005. return -EINVAL;
  1006. }
  1007. dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
  1008. pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
  1009. (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
  1010. return 0;
  1011. }
  1012. int se_dev_set_is_nonrot(struct se_device *dev, int flag)
  1013. {
  1014. if ((flag != 0) && (flag != 1)) {
  1015. printk(KERN_ERR "Illegal value %d\n", flag);
  1016. return -EINVAL;
  1017. }
  1018. dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
  1019. pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
  1020. dev, flag);
  1021. return 0;
  1022. }
  1023. int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
  1024. {
  1025. if (flag != 0) {
  1026. printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
  1027. " reordering not implemented\n", dev);
  1028. return -ENOSYS;
  1029. }
  1030. dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
  1031. pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
  1032. return 0;
  1033. }
  1034. /*
  1035. * Note, this can only be called on unexported SE Device Object.
  1036. */
  1037. int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
  1038. {
  1039. u32 orig_queue_depth = dev->queue_depth;
  1040. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1041. pr_err("dev[%p]: Unable to change SE Device TCQ while"
  1042. " dev_export_obj: %d count exists\n", dev,
  1043. atomic_read(&dev->dev_export_obj.obj_access_count));
  1044. return -EINVAL;
  1045. }
  1046. if (!queue_depth) {
  1047. pr_err("dev[%p]: Illegal ZERO value for queue"
  1048. "_depth\n", dev);
  1049. return -EINVAL;
  1050. }
  1051. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1052. if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
  1053. pr_err("dev[%p]: Passed queue_depth: %u"
  1054. " exceeds TCM/SE_Device TCQ: %u\n",
  1055. dev, queue_depth,
  1056. dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
  1057. return -EINVAL;
  1058. }
  1059. } else {
  1060. if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
  1061. if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
  1062. pr_err("dev[%p]: Passed queue_depth:"
  1063. " %u exceeds TCM/SE_Device MAX"
  1064. " TCQ: %u\n", dev, queue_depth,
  1065. dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
  1066. return -EINVAL;
  1067. }
  1068. }
  1069. }
  1070. dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
  1071. if (queue_depth > orig_queue_depth)
  1072. atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
  1073. else if (queue_depth < orig_queue_depth)
  1074. atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
  1075. pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
  1076. dev, queue_depth);
  1077. return 0;
  1078. }
  1079. int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
  1080. {
  1081. int force = 0; /* Force setting for VDEVS */
  1082. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1083. pr_err("dev[%p]: Unable to change SE Device"
  1084. " max_sectors while dev_export_obj: %d count exists\n",
  1085. dev, atomic_read(&dev->dev_export_obj.obj_access_count));
  1086. return -EINVAL;
  1087. }
  1088. if (!max_sectors) {
  1089. pr_err("dev[%p]: Illegal ZERO value for"
  1090. " max_sectors\n", dev);
  1091. return -EINVAL;
  1092. }
  1093. if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
  1094. pr_err("dev[%p]: Passed max_sectors: %u less than"
  1095. " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
  1096. DA_STATUS_MAX_SECTORS_MIN);
  1097. return -EINVAL;
  1098. }
  1099. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1100. if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
  1101. pr_err("dev[%p]: Passed max_sectors: %u"
  1102. " greater than TCM/SE_Device max_sectors:"
  1103. " %u\n", dev, max_sectors,
  1104. dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
  1105. return -EINVAL;
  1106. }
  1107. } else {
  1108. if (!force && (max_sectors >
  1109. dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
  1110. pr_err("dev[%p]: Passed max_sectors: %u"
  1111. " greater than TCM/SE_Device max_sectors"
  1112. ": %u, use force=1 to override.\n", dev,
  1113. max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
  1114. return -EINVAL;
  1115. }
  1116. if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
  1117. pr_err("dev[%p]: Passed max_sectors: %u"
  1118. " greater than DA_STATUS_MAX_SECTORS_MAX:"
  1119. " %u\n", dev, max_sectors,
  1120. DA_STATUS_MAX_SECTORS_MAX);
  1121. return -EINVAL;
  1122. }
  1123. }
  1124. /*
  1125. * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
  1126. */
  1127. max_sectors = se_dev_align_max_sectors(max_sectors,
  1128. dev->se_sub_dev->se_dev_attrib.block_size);
  1129. dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
  1130. pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
  1131. dev, max_sectors);
  1132. return 0;
  1133. }
  1134. int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
  1135. {
  1136. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1137. pr_err("dev[%p]: Unable to change SE Device"
  1138. " optimal_sectors while dev_export_obj: %d count exists\n",
  1139. dev, atomic_read(&dev->dev_export_obj.obj_access_count));
  1140. return -EINVAL;
  1141. }
  1142. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1143. pr_err("dev[%p]: Passed optimal_sectors cannot be"
  1144. " changed for TCM/pSCSI\n", dev);
  1145. return -EINVAL;
  1146. }
  1147. if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
  1148. pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
  1149. " greater than max_sectors: %u\n", dev,
  1150. optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
  1151. return -EINVAL;
  1152. }
  1153. dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
  1154. pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
  1155. dev, optimal_sectors);
  1156. return 0;
  1157. }
  1158. int se_dev_set_block_size(struct se_device *dev, u32 block_size)
  1159. {
  1160. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1161. pr_err("dev[%p]: Unable to change SE Device block_size"
  1162. " while dev_export_obj: %d count exists\n", dev,
  1163. atomic_read(&dev->dev_export_obj.obj_access_count));
  1164. return -EINVAL;
  1165. }
  1166. if ((block_size != 512) &&
  1167. (block_size != 1024) &&
  1168. (block_size != 2048) &&
  1169. (block_size != 4096)) {
  1170. pr_err("dev[%p]: Illegal value for block_device: %u"
  1171. " for SE device, must be 512, 1024, 2048 or 4096\n",
  1172. dev, block_size);
  1173. return -EINVAL;
  1174. }
  1175. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1176. pr_err("dev[%p]: Not allowed to change block_size for"
  1177. " Physical Device, use for Linux/SCSI to change"
  1178. " block_size for underlying hardware\n", dev);
  1179. return -EINVAL;
  1180. }
  1181. dev->se_sub_dev->se_dev_attrib.block_size = block_size;
  1182. pr_debug("dev[%p]: SE Device block_size changed to %u\n",
  1183. dev, block_size);
  1184. return 0;
  1185. }
  1186. struct se_lun *core_dev_add_lun(
  1187. struct se_portal_group *tpg,
  1188. struct se_hba *hba,
  1189. struct se_device *dev,
  1190. u32 lun)
  1191. {
  1192. struct se_lun *lun_p;
  1193. u32 lun_access = 0;
  1194. if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
  1195. pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
  1196. atomic_read(&dev->dev_access_obj.obj_access_count));
  1197. return NULL;
  1198. }
  1199. lun_p = core_tpg_pre_addlun(tpg, lun);
  1200. if ((IS_ERR(lun_p)) || !lun_p)
  1201. return NULL;
  1202. if (dev->dev_flags & DF_READ_ONLY)
  1203. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  1204. else
  1205. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  1206. if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
  1207. return NULL;
  1208. pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
  1209. " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1210. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
  1211. tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
  1212. /*
  1213. * Update LUN maps for dynamically added initiators when
  1214. * generate_node_acl is enabled.
  1215. */
  1216. if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
  1217. struct se_node_acl *acl;
  1218. spin_lock_bh(&tpg->acl_node_lock);
  1219. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  1220. if (acl->dynamic_node_acl &&
  1221. (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
  1222. !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
  1223. spin_unlock_bh(&tpg->acl_node_lock);
  1224. core_tpg_add_node_to_devs(acl, tpg);
  1225. spin_lock_bh(&tpg->acl_node_lock);
  1226. }
  1227. }
  1228. spin_unlock_bh(&tpg->acl_node_lock);
  1229. }
  1230. return lun_p;
  1231. }
  1232. /* core_dev_del_lun():
  1233. *
  1234. *
  1235. */
  1236. int core_dev_del_lun(
  1237. struct se_portal_group *tpg,
  1238. u32 unpacked_lun)
  1239. {
  1240. struct se_lun *lun;
  1241. int ret = 0;
  1242. lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
  1243. if (!lun)
  1244. return ret;
  1245. core_tpg_post_dellun(tpg, lun);
  1246. pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
  1247. " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
  1248. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
  1249. tpg->se_tpg_tfo->get_fabric_name());
  1250. return 0;
  1251. }
  1252. struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
  1253. {
  1254. struct se_lun *lun;
  1255. spin_lock(&tpg->tpg_lun_lock);
  1256. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1257. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
  1258. "_PER_TPG-1: %u for Target Portal Group: %hu\n",
  1259. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1260. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1261. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1262. spin_unlock(&tpg->tpg_lun_lock);
  1263. return NULL;
  1264. }
  1265. lun = &tpg->tpg_lun_list[unpacked_lun];
  1266. if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
  1267. pr_err("%s Logical Unit Number: %u is not free on"
  1268. " Target Portal Group: %hu, ignoring request.\n",
  1269. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1270. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1271. spin_unlock(&tpg->tpg_lun_lock);
  1272. return NULL;
  1273. }
  1274. spin_unlock(&tpg->tpg_lun_lock);
  1275. return lun;
  1276. }
  1277. /* core_dev_get_lun():
  1278. *
  1279. *
  1280. */
  1281. static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
  1282. {
  1283. struct se_lun *lun;
  1284. spin_lock(&tpg->tpg_lun_lock);
  1285. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1286. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
  1287. "_TPG-1: %u for Target Portal Group: %hu\n",
  1288. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1289. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1290. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1291. spin_unlock(&tpg->tpg_lun_lock);
  1292. return NULL;
  1293. }
  1294. lun = &tpg->tpg_lun_list[unpacked_lun];
  1295. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  1296. pr_err("%s Logical Unit Number: %u is not active on"
  1297. " Target Portal Group: %hu, ignoring request.\n",
  1298. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1299. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1300. spin_unlock(&tpg->tpg_lun_lock);
  1301. return NULL;
  1302. }
  1303. spin_unlock(&tpg->tpg_lun_lock);
  1304. return lun;
  1305. }
  1306. struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
  1307. struct se_portal_group *tpg,
  1308. u32 mapped_lun,
  1309. char *initiatorname,
  1310. int *ret)
  1311. {
  1312. struct se_lun_acl *lacl;
  1313. struct se_node_acl *nacl;
  1314. if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
  1315. pr_err("%s InitiatorName exceeds maximum size.\n",
  1316. tpg->se_tpg_tfo->get_fabric_name());
  1317. *ret = -EOVERFLOW;
  1318. return NULL;
  1319. }
  1320. nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
  1321. if (!nacl) {
  1322. *ret = -EINVAL;
  1323. return NULL;
  1324. }
  1325. lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
  1326. if (!lacl) {
  1327. pr_err("Unable to allocate memory for struct se_lun_acl.\n");
  1328. *ret = -ENOMEM;
  1329. return NULL;
  1330. }
  1331. INIT_LIST_HEAD(&lacl->lacl_list);
  1332. lacl->mapped_lun = mapped_lun;
  1333. lacl->se_lun_nacl = nacl;
  1334. snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  1335. return lacl;
  1336. }
  1337. int core_dev_add_initiator_node_lun_acl(
  1338. struct se_portal_group *tpg,
  1339. struct se_lun_acl *lacl,
  1340. u32 unpacked_lun,
  1341. u32 lun_access)
  1342. {
  1343. struct se_lun *lun;
  1344. struct se_node_acl *nacl;
  1345. lun = core_dev_get_lun(tpg, unpacked_lun);
  1346. if (!lun) {
  1347. pr_err("%s Logical Unit Number: %u is not active on"
  1348. " Target Portal Group: %hu, ignoring request.\n",
  1349. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1350. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1351. return -EINVAL;
  1352. }
  1353. nacl = lacl->se_lun_nacl;
  1354. if (!nacl)
  1355. return -EINVAL;
  1356. if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
  1357. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
  1358. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  1359. lacl->se_lun = lun;
  1360. if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
  1361. lun_access, nacl, tpg, 1) < 0)
  1362. return -EINVAL;
  1363. spin_lock(&lun->lun_acl_lock);
  1364. list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
  1365. atomic_inc(&lun->lun_acl_count);
  1366. smp_mb__after_atomic_inc();
  1367. spin_unlock(&lun->lun_acl_lock);
  1368. pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
  1369. " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  1370. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
  1371. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
  1372. lacl->initiatorname);
  1373. /*
  1374. * Check to see if there are any existing persistent reservation APTPL
  1375. * pre-registrations that need to be enabled for this LUN ACL..
  1376. */
  1377. core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
  1378. return 0;
  1379. }
  1380. /* core_dev_del_initiator_node_lun_acl():
  1381. *
  1382. *
  1383. */
  1384. int core_dev_del_initiator_node_lun_acl(
  1385. struct se_portal_group *tpg,
  1386. struct se_lun *lun,
  1387. struct se_lun_acl *lacl)
  1388. {
  1389. struct se_node_acl *nacl;
  1390. nacl = lacl->se_lun_nacl;
  1391. if (!nacl)
  1392. return -EINVAL;
  1393. spin_lock(&lun->lun_acl_lock);
  1394. list_del(&lacl->lacl_list);
  1395. atomic_dec(&lun->lun_acl_count);
  1396. smp_mb__after_atomic_dec();
  1397. spin_unlock(&lun->lun_acl_lock);
  1398. core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
  1399. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
  1400. lacl->se_lun = NULL;
  1401. pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
  1402. " InitiatorNode: %s Mapped LUN: %u\n",
  1403. tpg->se_tpg_tfo->get_fabric_name(),
  1404. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  1405. lacl->initiatorname, lacl->mapped_lun);
  1406. return 0;
  1407. }
  1408. void core_dev_free_initiator_node_lun_acl(
  1409. struct se_portal_group *tpg,
  1410. struct se_lun_acl *lacl)
  1411. {
  1412. pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
  1413. " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1414. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  1415. tpg->se_tpg_tfo->get_fabric_name(),
  1416. lacl->initiatorname, lacl->mapped_lun);
  1417. kfree(lacl);
  1418. }
  1419. int core_dev_setup_virtual_lun0(void)
  1420. {
  1421. struct se_hba *hba;
  1422. struct se_device *dev;
  1423. struct se_subsystem_dev *se_dev = NULL;
  1424. struct se_subsystem_api *t;
  1425. char buf[16];
  1426. int ret;
  1427. hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
  1428. if (IS_ERR(hba))
  1429. return PTR_ERR(hba);
  1430. lun0_hba = hba;
  1431. t = hba->transport;
  1432. se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
  1433. if (!se_dev) {
  1434. pr_err("Unable to allocate memory for"
  1435. " struct se_subsystem_dev\n");
  1436. ret = -ENOMEM;
  1437. goto out;
  1438. }
  1439. INIT_LIST_HEAD(&se_dev->se_dev_node);
  1440. INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
  1441. spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
  1442. INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
  1443. INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
  1444. spin_lock_init(&se_dev->t10_pr.registration_lock);
  1445. spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
  1446. INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
  1447. spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
  1448. spin_lock_init(&se_dev->se_dev_lock);
  1449. se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
  1450. se_dev->t10_wwn.t10_sub_dev = se_dev;
  1451. se_dev->t10_alua.t10_sub_dev = se_dev;
  1452. se_dev->se_dev_attrib.da_sub_dev = se_dev;
  1453. se_dev->se_dev_hba = hba;
  1454. se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
  1455. if (!se_dev->se_dev_su_ptr) {
  1456. pr_err("Unable to locate subsystem dependent pointer"
  1457. " from allocate_virtdevice()\n");
  1458. ret = -ENOMEM;
  1459. goto out;
  1460. }
  1461. lun0_su_dev = se_dev;
  1462. memset(buf, 0, 16);
  1463. sprintf(buf, "rd_pages=8");
  1464. t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
  1465. dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
  1466. if (IS_ERR(dev)) {
  1467. ret = PTR_ERR(dev);
  1468. goto out;
  1469. }
  1470. se_dev->se_dev_ptr = dev;
  1471. g_lun0_dev = dev;
  1472. return 0;
  1473. out:
  1474. lun0_su_dev = NULL;
  1475. kfree(se_dev);
  1476. if (lun0_hba) {
  1477. core_delete_hba(lun0_hba);
  1478. lun0_hba = NULL;
  1479. }
  1480. return ret;
  1481. }
  1482. void core_dev_release_virtual_lun0(void)
  1483. {
  1484. struct se_hba *hba = lun0_hba;
  1485. struct se_subsystem_dev *su_dev = lun0_su_dev;
  1486. if (!hba)
  1487. return;
  1488. if (g_lun0_dev)
  1489. se_free_virtual_device(g_lun0_dev, hba);
  1490. kfree(su_dev);
  1491. core_delete_hba(hba);
  1492. }