target_core_device.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655
  1. /*******************************************************************************
  2. * Filename: target_core_device.c (based on iscsi_target_device.c)
  3. *
  4. * This file contains the TCM Virtual Device and Disk Transport
  5. * agnostic related functions.
  6. *
  7. * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
  8. * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
  9. * Copyright (c) 2007-2010 Rising Tide Systems
  10. * Copyright (c) 2008-2010 Linux-iSCSI.org
  11. *
  12. * Nicholas A. Bellinger <nab@kernel.org>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  27. *
  28. ******************************************************************************/
  29. #include <linux/net.h>
  30. #include <linux/string.h>
  31. #include <linux/delay.h>
  32. #include <linux/timer.h>
  33. #include <linux/slab.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/kthread.h>
  36. #include <linux/in.h>
  37. #include <linux/export.h>
  38. #include <net/sock.h>
  39. #include <net/tcp.h>
  40. #include <scsi/scsi.h>
  41. #include <scsi/scsi_device.h>
  42. #include <target/target_core_base.h>
  43. #include <target/target_core_backend.h>
  44. #include <target/target_core_fabric.h>
  45. #include "target_core_internal.h"
  46. #include "target_core_alua.h"
  47. #include "target_core_pr.h"
  48. #include "target_core_ua.h"
  49. static void se_dev_start(struct se_device *dev);
  50. static void se_dev_stop(struct se_device *dev);
  51. static struct se_hba *lun0_hba;
  52. static struct se_subsystem_dev *lun0_su_dev;
  53. /* not static, needed by tpg.c */
  54. struct se_device *g_lun0_dev;
  55. int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  56. {
  57. struct se_lun *se_lun = NULL;
  58. struct se_session *se_sess = se_cmd->se_sess;
  59. struct se_device *dev;
  60. unsigned long flags;
  61. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
  62. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  63. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  64. return -ENODEV;
  65. }
  66. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  67. se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
  68. if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  69. struct se_dev_entry *deve = se_cmd->se_deve;
  70. deve->total_cmds++;
  71. deve->total_bytes += se_cmd->data_length;
  72. if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  73. (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
  74. se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  75. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  76. pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  77. " Access for 0x%08x\n",
  78. se_cmd->se_tfo->get_fabric_name(),
  79. unpacked_lun);
  80. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  81. return -EACCES;
  82. }
  83. if (se_cmd->data_direction == DMA_TO_DEVICE)
  84. deve->write_bytes += se_cmd->data_length;
  85. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  86. deve->read_bytes += se_cmd->data_length;
  87. deve->deve_cmds++;
  88. se_lun = deve->se_lun;
  89. se_cmd->se_lun = deve->se_lun;
  90. se_cmd->pr_res_key = deve->pr_res_key;
  91. se_cmd->orig_fe_lun = unpacked_lun;
  92. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  93. }
  94. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  95. if (!se_lun) {
  96. /*
  97. * Use the se_portal_group->tpg_virt_lun0 to allow for
  98. * REPORT_LUNS, et al to be returned when no active
  99. * MappedLUN=0 exists for this Initiator Port.
  100. */
  101. if (unpacked_lun != 0) {
  102. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  103. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  104. pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  105. " Access for 0x%08x\n",
  106. se_cmd->se_tfo->get_fabric_name(),
  107. unpacked_lun);
  108. return -ENODEV;
  109. }
  110. /*
  111. * Force WRITE PROTECT for virtual LUN 0
  112. */
  113. if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
  114. (se_cmd->data_direction != DMA_NONE)) {
  115. se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  116. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  117. return -EACCES;
  118. }
  119. se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  120. se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  121. se_cmd->orig_fe_lun = 0;
  122. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  123. }
  124. /*
  125. * Determine if the struct se_lun is online.
  126. * FIXME: Check for LUN_RESET + UNIT Attention
  127. */
  128. if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
  129. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  130. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  131. return -ENODEV;
  132. }
  133. /* Directly associate cmd with se_dev */
  134. se_cmd->se_dev = se_lun->lun_se_dev;
  135. /* TODO: get rid of this and use atomics for stats */
  136. dev = se_lun->lun_se_dev;
  137. spin_lock_irqsave(&dev->stats_lock, flags);
  138. dev->num_cmds++;
  139. if (se_cmd->data_direction == DMA_TO_DEVICE)
  140. dev->write_bytes += se_cmd->data_length;
  141. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  142. dev->read_bytes += se_cmd->data_length;
  143. spin_unlock_irqrestore(&dev->stats_lock, flags);
  144. spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
  145. list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
  146. spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
  147. return 0;
  148. }
  149. EXPORT_SYMBOL(transport_lookup_cmd_lun);
  150. int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  151. {
  152. struct se_dev_entry *deve;
  153. struct se_lun *se_lun = NULL;
  154. struct se_session *se_sess = se_cmd->se_sess;
  155. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  156. unsigned long flags;
  157. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
  158. se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  159. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  160. return -ENODEV;
  161. }
  162. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  163. se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
  164. deve = se_cmd->se_deve;
  165. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  166. se_tmr->tmr_lun = deve->se_lun;
  167. se_cmd->se_lun = deve->se_lun;
  168. se_lun = deve->se_lun;
  169. se_cmd->pr_res_key = deve->pr_res_key;
  170. se_cmd->orig_fe_lun = unpacked_lun;
  171. }
  172. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  173. if (!se_lun) {
  174. pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  175. " Access for 0x%08x\n",
  176. se_cmd->se_tfo->get_fabric_name(),
  177. unpacked_lun);
  178. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  179. return -ENODEV;
  180. }
  181. /*
  182. * Determine if the struct se_lun is online.
  183. * FIXME: Check for LUN_RESET + UNIT Attention
  184. */
  185. if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
  186. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  187. return -ENODEV;
  188. }
  189. /* Directly associate cmd with se_dev */
  190. se_cmd->se_dev = se_lun->lun_se_dev;
  191. se_tmr->tmr_dev = se_lun->lun_se_dev;
  192. spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
  193. list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
  194. spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
  195. return 0;
  196. }
  197. EXPORT_SYMBOL(transport_lookup_tmr_lun);
  198. /*
  199. * This function is called from core_scsi3_emulate_pro_register_and_move()
  200. * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
  201. * when a matching rtpi is found.
  202. */
  203. struct se_dev_entry *core_get_se_deve_from_rtpi(
  204. struct se_node_acl *nacl,
  205. u16 rtpi)
  206. {
  207. struct se_dev_entry *deve;
  208. struct se_lun *lun;
  209. struct se_port *port;
  210. struct se_portal_group *tpg = nacl->se_tpg;
  211. u32 i;
  212. spin_lock_irq(&nacl->device_list_lock);
  213. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  214. deve = nacl->device_list[i];
  215. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  216. continue;
  217. lun = deve->se_lun;
  218. if (!lun) {
  219. pr_err("%s device entries device pointer is"
  220. " NULL, but Initiator has access.\n",
  221. tpg->se_tpg_tfo->get_fabric_name());
  222. continue;
  223. }
  224. port = lun->lun_sep;
  225. if (!port) {
  226. pr_err("%s device entries device pointer is"
  227. " NULL, but Initiator has access.\n",
  228. tpg->se_tpg_tfo->get_fabric_name());
  229. continue;
  230. }
  231. if (port->sep_rtpi != rtpi)
  232. continue;
  233. atomic_inc(&deve->pr_ref_count);
  234. smp_mb__after_atomic_inc();
  235. spin_unlock_irq(&nacl->device_list_lock);
  236. return deve;
  237. }
  238. spin_unlock_irq(&nacl->device_list_lock);
  239. return NULL;
  240. }
  241. int core_free_device_list_for_node(
  242. struct se_node_acl *nacl,
  243. struct se_portal_group *tpg)
  244. {
  245. struct se_dev_entry *deve;
  246. struct se_lun *lun;
  247. u32 i;
  248. if (!nacl->device_list)
  249. return 0;
  250. spin_lock_irq(&nacl->device_list_lock);
  251. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  252. deve = nacl->device_list[i];
  253. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  254. continue;
  255. if (!deve->se_lun) {
  256. pr_err("%s device entries device pointer is"
  257. " NULL, but Initiator has access.\n",
  258. tpg->se_tpg_tfo->get_fabric_name());
  259. continue;
  260. }
  261. lun = deve->se_lun;
  262. spin_unlock_irq(&nacl->device_list_lock);
  263. core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
  264. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
  265. spin_lock_irq(&nacl->device_list_lock);
  266. }
  267. spin_unlock_irq(&nacl->device_list_lock);
  268. array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
  269. nacl->device_list = NULL;
  270. return 0;
  271. }
  272. void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
  273. {
  274. struct se_dev_entry *deve;
  275. unsigned long flags;
  276. spin_lock_irqsave(&se_nacl->device_list_lock, flags);
  277. deve = se_nacl->device_list[se_cmd->orig_fe_lun];
  278. deve->deve_cmds--;
  279. spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
  280. }
  281. void core_update_device_list_access(
  282. u32 mapped_lun,
  283. u32 lun_access,
  284. struct se_node_acl *nacl)
  285. {
  286. struct se_dev_entry *deve;
  287. spin_lock_irq(&nacl->device_list_lock);
  288. deve = nacl->device_list[mapped_lun];
  289. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  290. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  291. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  292. } else {
  293. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  294. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  295. }
  296. spin_unlock_irq(&nacl->device_list_lock);
  297. }
  298. /* core_enable_device_list_for_node():
  299. *
  300. *
  301. */
  302. int core_enable_device_list_for_node(
  303. struct se_lun *lun,
  304. struct se_lun_acl *lun_acl,
  305. u32 mapped_lun,
  306. u32 lun_access,
  307. struct se_node_acl *nacl,
  308. struct se_portal_group *tpg)
  309. {
  310. struct se_port *port = lun->lun_sep;
  311. struct se_dev_entry *deve;
  312. spin_lock_irq(&nacl->device_list_lock);
  313. deve = nacl->device_list[mapped_lun];
  314. /*
  315. * Check if the call is handling demo mode -> explict LUN ACL
  316. * transition. This transition must be for the same struct se_lun
  317. * + mapped_lun that was setup in demo mode..
  318. */
  319. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  320. if (deve->se_lun_acl != NULL) {
  321. pr_err("struct se_dev_entry->se_lun_acl"
  322. " already set for demo mode -> explict"
  323. " LUN ACL transition\n");
  324. spin_unlock_irq(&nacl->device_list_lock);
  325. return -EINVAL;
  326. }
  327. if (deve->se_lun != lun) {
  328. pr_err("struct se_dev_entry->se_lun does"
  329. " match passed struct se_lun for demo mode"
  330. " -> explict LUN ACL transition\n");
  331. spin_unlock_irq(&nacl->device_list_lock);
  332. return -EINVAL;
  333. }
  334. deve->se_lun_acl = lun_acl;
  335. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  336. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  337. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  338. } else {
  339. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  340. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  341. }
  342. spin_unlock_irq(&nacl->device_list_lock);
  343. return 0;
  344. }
  345. deve->se_lun = lun;
  346. deve->se_lun_acl = lun_acl;
  347. deve->mapped_lun = mapped_lun;
  348. deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
  349. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  350. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  351. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  352. } else {
  353. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  354. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  355. }
  356. deve->creation_time = get_jiffies_64();
  357. deve->attach_count++;
  358. spin_unlock_irq(&nacl->device_list_lock);
  359. spin_lock_bh(&port->sep_alua_lock);
  360. list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
  361. spin_unlock_bh(&port->sep_alua_lock);
  362. return 0;
  363. }
  364. /* core_disable_device_list_for_node():
  365. *
  366. *
  367. */
  368. int core_disable_device_list_for_node(
  369. struct se_lun *lun,
  370. struct se_lun_acl *lun_acl,
  371. u32 mapped_lun,
  372. u32 lun_access,
  373. struct se_node_acl *nacl,
  374. struct se_portal_group *tpg)
  375. {
  376. struct se_port *port = lun->lun_sep;
  377. struct se_dev_entry *deve = nacl->device_list[mapped_lun];
  378. /*
  379. * If the MappedLUN entry is being disabled, the entry in
  380. * port->sep_alua_list must be removed now before clearing the
  381. * struct se_dev_entry pointers below as logic in
  382. * core_alua_do_transition_tg_pt() depends on these being present.
  383. *
  384. * deve->se_lun_acl will be NULL for demo-mode created LUNs
  385. * that have not been explicitly converted to MappedLUNs ->
  386. * struct se_lun_acl, but we remove deve->alua_port_list from
  387. * port->sep_alua_list. This also means that active UAs and
  388. * NodeACL context specific PR metadata for demo-mode
  389. * MappedLUN *deve will be released below..
  390. */
  391. spin_lock_bh(&port->sep_alua_lock);
  392. list_del(&deve->alua_port_list);
  393. spin_unlock_bh(&port->sep_alua_lock);
  394. /*
  395. * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
  396. * PR operation to complete.
  397. */
  398. while (atomic_read(&deve->pr_ref_count) != 0)
  399. cpu_relax();
  400. spin_lock_irq(&nacl->device_list_lock);
  401. /*
  402. * Disable struct se_dev_entry LUN ACL mapping
  403. */
  404. core_scsi3_ua_release_all(deve);
  405. deve->se_lun = NULL;
  406. deve->se_lun_acl = NULL;
  407. deve->lun_flags = 0;
  408. deve->creation_time = 0;
  409. deve->attach_count--;
  410. spin_unlock_irq(&nacl->device_list_lock);
  411. core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
  412. return 0;
  413. }
  414. /* core_clear_lun_from_tpg():
  415. *
  416. *
  417. */
  418. void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
  419. {
  420. struct se_node_acl *nacl;
  421. struct se_dev_entry *deve;
  422. u32 i;
  423. spin_lock_irq(&tpg->acl_node_lock);
  424. list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
  425. spin_unlock_irq(&tpg->acl_node_lock);
  426. spin_lock_irq(&nacl->device_list_lock);
  427. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  428. deve = nacl->device_list[i];
  429. if (lun != deve->se_lun)
  430. continue;
  431. spin_unlock_irq(&nacl->device_list_lock);
  432. core_disable_device_list_for_node(lun, NULL,
  433. deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
  434. nacl, tpg);
  435. spin_lock_irq(&nacl->device_list_lock);
  436. }
  437. spin_unlock_irq(&nacl->device_list_lock);
  438. spin_lock_irq(&tpg->acl_node_lock);
  439. }
  440. spin_unlock_irq(&tpg->acl_node_lock);
  441. }
  442. static struct se_port *core_alloc_port(struct se_device *dev)
  443. {
  444. struct se_port *port, *port_tmp;
  445. port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
  446. if (!port) {
  447. pr_err("Unable to allocate struct se_port\n");
  448. return ERR_PTR(-ENOMEM);
  449. }
  450. INIT_LIST_HEAD(&port->sep_alua_list);
  451. INIT_LIST_HEAD(&port->sep_list);
  452. atomic_set(&port->sep_tg_pt_secondary_offline, 0);
  453. spin_lock_init(&port->sep_alua_lock);
  454. mutex_init(&port->sep_tg_pt_md_mutex);
  455. spin_lock(&dev->se_port_lock);
  456. if (dev->dev_port_count == 0x0000ffff) {
  457. pr_warn("Reached dev->dev_port_count =="
  458. " 0x0000ffff\n");
  459. spin_unlock(&dev->se_port_lock);
  460. return ERR_PTR(-ENOSPC);
  461. }
  462. again:
  463. /*
  464. * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
  465. * Here is the table from spc4r17 section 7.7.3.8.
  466. *
  467. * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
  468. *
  469. * Code Description
  470. * 0h Reserved
  471. * 1h Relative port 1, historically known as port A
  472. * 2h Relative port 2, historically known as port B
  473. * 3h to FFFFh Relative port 3 through 65 535
  474. */
  475. port->sep_rtpi = dev->dev_rpti_counter++;
  476. if (!port->sep_rtpi)
  477. goto again;
  478. list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
  479. /*
  480. * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
  481. * for 16-bit wrap..
  482. */
  483. if (port->sep_rtpi == port_tmp->sep_rtpi)
  484. goto again;
  485. }
  486. spin_unlock(&dev->se_port_lock);
  487. return port;
  488. }
  489. static void core_export_port(
  490. struct se_device *dev,
  491. struct se_portal_group *tpg,
  492. struct se_port *port,
  493. struct se_lun *lun)
  494. {
  495. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  496. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
  497. spin_lock(&dev->se_port_lock);
  498. spin_lock(&lun->lun_sep_lock);
  499. port->sep_tpg = tpg;
  500. port->sep_lun = lun;
  501. lun->lun_sep = port;
  502. spin_unlock(&lun->lun_sep_lock);
  503. list_add_tail(&port->sep_list, &dev->dev_sep_list);
  504. spin_unlock(&dev->se_port_lock);
  505. if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
  506. tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
  507. if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
  508. pr_err("Unable to allocate t10_alua_tg_pt"
  509. "_gp_member_t\n");
  510. return;
  511. }
  512. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  513. __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
  514. su_dev->t10_alua.default_tg_pt_gp);
  515. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  516. pr_debug("%s/%s: Adding to default ALUA Target Port"
  517. " Group: alua/default_tg_pt_gp\n",
  518. dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
  519. }
  520. dev->dev_port_count++;
  521. port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
  522. }
  523. /*
  524. * Called with struct se_device->se_port_lock spinlock held.
  525. */
  526. static void core_release_port(struct se_device *dev, struct se_port *port)
  527. __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
  528. {
  529. /*
  530. * Wait for any port reference for PR ALL_TG_PT=1 operation
  531. * to complete in __core_scsi3_alloc_registration()
  532. */
  533. spin_unlock(&dev->se_port_lock);
  534. if (atomic_read(&port->sep_tg_pt_ref_cnt))
  535. cpu_relax();
  536. spin_lock(&dev->se_port_lock);
  537. core_alua_free_tg_pt_gp_mem(port);
  538. list_del(&port->sep_list);
  539. dev->dev_port_count--;
  540. kfree(port);
  541. }
  542. int core_dev_export(
  543. struct se_device *dev,
  544. struct se_portal_group *tpg,
  545. struct se_lun *lun)
  546. {
  547. struct se_port *port;
  548. port = core_alloc_port(dev);
  549. if (IS_ERR(port))
  550. return PTR_ERR(port);
  551. lun->lun_se_dev = dev;
  552. se_dev_start(dev);
  553. atomic_inc(&dev->dev_export_obj.obj_access_count);
  554. core_export_port(dev, tpg, port, lun);
  555. return 0;
  556. }
  557. void core_dev_unexport(
  558. struct se_device *dev,
  559. struct se_portal_group *tpg,
  560. struct se_lun *lun)
  561. {
  562. struct se_port *port = lun->lun_sep;
  563. spin_lock(&lun->lun_sep_lock);
  564. if (lun->lun_se_dev == NULL) {
  565. spin_unlock(&lun->lun_sep_lock);
  566. return;
  567. }
  568. spin_unlock(&lun->lun_sep_lock);
  569. spin_lock(&dev->se_port_lock);
  570. atomic_dec(&dev->dev_export_obj.obj_access_count);
  571. core_release_port(dev, port);
  572. spin_unlock(&dev->se_port_lock);
  573. se_dev_stop(dev);
  574. lun->lun_se_dev = NULL;
  575. }
  576. int target_report_luns(struct se_cmd *se_cmd)
  577. {
  578. struct se_dev_entry *deve;
  579. struct se_session *se_sess = se_cmd->se_sess;
  580. unsigned char *buf;
  581. u32 lun_count = 0, offset = 8, i;
  582. if (se_cmd->data_length < 16) {
  583. pr_warn("REPORT LUNS allocation length %u too small\n",
  584. se_cmd->data_length);
  585. se_cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  586. return -EINVAL;
  587. }
  588. buf = transport_kmap_data_sg(se_cmd);
  589. if (!buf)
  590. return -ENOMEM;
  591. /*
  592. * If no struct se_session pointer is present, this struct se_cmd is
  593. * coming via a target_core_mod PASSTHROUGH op, and not through
  594. * a $FABRIC_MOD. In that case, report LUN=0 only.
  595. */
  596. if (!se_sess) {
  597. int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
  598. lun_count = 1;
  599. goto done;
  600. }
  601. spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
  602. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  603. deve = se_sess->se_node_acl->device_list[i];
  604. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  605. continue;
  606. /*
  607. * We determine the correct LUN LIST LENGTH even once we
  608. * have reached the initial allocation length.
  609. * See SPC2-R20 7.19.
  610. */
  611. lun_count++;
  612. if ((offset + 8) > se_cmd->data_length)
  613. continue;
  614. int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
  615. offset += 8;
  616. }
  617. spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
  618. /*
  619. * See SPC3 r07, page 159.
  620. */
  621. done:
  622. lun_count *= 8;
  623. buf[0] = ((lun_count >> 24) & 0xff);
  624. buf[1] = ((lun_count >> 16) & 0xff);
  625. buf[2] = ((lun_count >> 8) & 0xff);
  626. buf[3] = (lun_count & 0xff);
  627. transport_kunmap_data_sg(se_cmd);
  628. target_complete_cmd(se_cmd, GOOD);
  629. return 0;
  630. }
  631. /* se_release_device_for_hba():
  632. *
  633. *
  634. */
  635. void se_release_device_for_hba(struct se_device *dev)
  636. {
  637. struct se_hba *hba = dev->se_hba;
  638. if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
  639. (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
  640. (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
  641. (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
  642. (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
  643. se_dev_stop(dev);
  644. if (dev->dev_ptr) {
  645. destroy_workqueue(dev->tmr_wq);
  646. if (dev->transport->free_device)
  647. dev->transport->free_device(dev->dev_ptr);
  648. }
  649. spin_lock(&hba->device_lock);
  650. list_del(&dev->dev_list);
  651. hba->dev_count--;
  652. spin_unlock(&hba->device_lock);
  653. core_scsi3_free_all_registrations(dev);
  654. se_release_vpd_for_dev(dev);
  655. kfree(dev);
  656. }
  657. void se_release_vpd_for_dev(struct se_device *dev)
  658. {
  659. struct t10_vpd *vpd, *vpd_tmp;
  660. spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
  661. list_for_each_entry_safe(vpd, vpd_tmp,
  662. &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
  663. list_del(&vpd->vpd_list);
  664. kfree(vpd);
  665. }
  666. spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
  667. }
  668. /* se_free_virtual_device():
  669. *
  670. * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
  671. */
  672. int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
  673. {
  674. if (!list_empty(&dev->dev_sep_list))
  675. dump_stack();
  676. core_alua_free_lu_gp_mem(dev);
  677. se_release_device_for_hba(dev);
  678. return 0;
  679. }
  680. static void se_dev_start(struct se_device *dev)
  681. {
  682. struct se_hba *hba = dev->se_hba;
  683. spin_lock(&hba->device_lock);
  684. atomic_inc(&dev->dev_obj.obj_access_count);
  685. if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
  686. if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
  687. dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
  688. dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
  689. } else if (dev->dev_status &
  690. TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
  691. dev->dev_status &=
  692. ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
  693. dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
  694. }
  695. }
  696. spin_unlock(&hba->device_lock);
  697. }
  698. static void se_dev_stop(struct se_device *dev)
  699. {
  700. struct se_hba *hba = dev->se_hba;
  701. spin_lock(&hba->device_lock);
  702. atomic_dec(&dev->dev_obj.obj_access_count);
  703. if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
  704. if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
  705. dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
  706. dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
  707. } else if (dev->dev_status &
  708. TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
  709. dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
  710. dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
  711. }
  712. }
  713. spin_unlock(&hba->device_lock);
  714. }
  715. int se_dev_check_online(struct se_device *dev)
  716. {
  717. unsigned long flags;
  718. int ret;
  719. spin_lock_irqsave(&dev->dev_status_lock, flags);
  720. ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
  721. (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
  722. spin_unlock_irqrestore(&dev->dev_status_lock, flags);
  723. return ret;
  724. }
  725. int se_dev_check_shutdown(struct se_device *dev)
  726. {
  727. int ret;
  728. spin_lock_irq(&dev->dev_status_lock);
  729. ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
  730. spin_unlock_irq(&dev->dev_status_lock);
  731. return ret;
  732. }
  733. static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
  734. {
  735. u32 aligned_max_sectors;
  736. u32 alignment;
  737. /*
  738. * Limit max_sectors to a PAGE_SIZE aligned value for modern
  739. * transport_allocate_data_tasks() operation.
  740. */
  741. alignment = max(1ul, PAGE_SIZE / block_size);
  742. aligned_max_sectors = rounddown(max_sectors, alignment);
  743. if (max_sectors != aligned_max_sectors)
  744. pr_info("Rounding down aligned max_sectors from %u to %u\n",
  745. max_sectors, aligned_max_sectors);
  746. return aligned_max_sectors;
  747. }
  748. void se_dev_set_default_attribs(
  749. struct se_device *dev,
  750. struct se_dev_limits *dev_limits)
  751. {
  752. struct queue_limits *limits = &dev_limits->limits;
  753. dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
  754. dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
  755. dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
  756. dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
  757. dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
  758. dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
  759. dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
  760. dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
  761. dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
  762. dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
  763. dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
  764. dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
  765. dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
  766. /*
  767. * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
  768. * iblock_create_virtdevice() from struct queue_limits values
  769. * if blk_queue_discard()==1
  770. */
  771. dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
  772. dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
  773. DA_MAX_UNMAP_BLOCK_DESC_COUNT;
  774. dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
  775. dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
  776. DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
  777. /*
  778. * block_size is based on subsystem plugin dependent requirements.
  779. */
  780. dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
  781. dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
  782. /*
  783. * Align max_hw_sectors down to PAGE_SIZE I/O transfers
  784. */
  785. limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
  786. limits->logical_block_size);
  787. dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
  788. /*
  789. * Set fabric_max_sectors, which is reported in block limits
  790. * VPD page (B0h).
  791. */
  792. dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
  793. /*
  794. * Set optimal_sectors from fabric_max_sectors, which can be
  795. * lowered via configfs.
  796. */
  797. dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
  798. /*
  799. * queue_depth is based on subsystem plugin dependent requirements.
  800. */
  801. dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
  802. dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
  803. }
  804. int se_dev_set_max_unmap_lba_count(
  805. struct se_device *dev,
  806. u32 max_unmap_lba_count)
  807. {
  808. dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
  809. pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
  810. dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
  811. return 0;
  812. }
  813. int se_dev_set_max_unmap_block_desc_count(
  814. struct se_device *dev,
  815. u32 max_unmap_block_desc_count)
  816. {
  817. dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
  818. max_unmap_block_desc_count;
  819. pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
  820. dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
  821. return 0;
  822. }
  823. int se_dev_set_unmap_granularity(
  824. struct se_device *dev,
  825. u32 unmap_granularity)
  826. {
  827. dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
  828. pr_debug("dev[%p]: Set unmap_granularity: %u\n",
  829. dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
  830. return 0;
  831. }
  832. int se_dev_set_unmap_granularity_alignment(
  833. struct se_device *dev,
  834. u32 unmap_granularity_alignment)
  835. {
  836. dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
  837. pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
  838. dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
  839. return 0;
  840. }
  841. int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
  842. {
  843. if (flag != 0 && flag != 1) {
  844. pr_err("Illegal value %d\n", flag);
  845. return -EINVAL;
  846. }
  847. if (flag) {
  848. pr_err("dpo_emulated not supported\n");
  849. return -EINVAL;
  850. }
  851. return 0;
  852. }
  853. int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
  854. {
  855. if (flag != 0 && flag != 1) {
  856. pr_err("Illegal value %d\n", flag);
  857. return -EINVAL;
  858. }
  859. if (flag &&
  860. dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  861. pr_err("emulate_fua_write not supported for pSCSI\n");
  862. return -EINVAL;
  863. }
  864. dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
  865. pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
  866. dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
  867. return 0;
  868. }
  869. int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
  870. {
  871. if (flag != 0 && flag != 1) {
  872. pr_err("Illegal value %d\n", flag);
  873. return -EINVAL;
  874. }
  875. if (flag) {
  876. pr_err("ua read emulated not supported\n");
  877. return -EINVAL;
  878. }
  879. return 0;
  880. }
  881. int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
  882. {
  883. if (flag != 0 && flag != 1) {
  884. pr_err("Illegal value %d\n", flag);
  885. return -EINVAL;
  886. }
  887. if (flag &&
  888. dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  889. pr_err("emulate_write_cache not supported for pSCSI\n");
  890. return -EINVAL;
  891. }
  892. dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
  893. pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
  894. dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
  895. return 0;
  896. }
  897. int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
  898. {
  899. if ((flag != 0) && (flag != 1) && (flag != 2)) {
  900. pr_err("Illegal value %d\n", flag);
  901. return -EINVAL;
  902. }
  903. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  904. pr_err("dev[%p]: Unable to change SE Device"
  905. " UA_INTRLCK_CTRL while dev_export_obj: %d count"
  906. " exists\n", dev,
  907. atomic_read(&dev->dev_export_obj.obj_access_count));
  908. return -EINVAL;
  909. }
  910. dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
  911. pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
  912. dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
  913. return 0;
  914. }
  915. int se_dev_set_emulate_tas(struct se_device *dev, int flag)
  916. {
  917. if ((flag != 0) && (flag != 1)) {
  918. pr_err("Illegal value %d\n", flag);
  919. return -EINVAL;
  920. }
  921. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  922. pr_err("dev[%p]: Unable to change SE Device TAS while"
  923. " dev_export_obj: %d count exists\n", dev,
  924. atomic_read(&dev->dev_export_obj.obj_access_count));
  925. return -EINVAL;
  926. }
  927. dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
  928. pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
  929. dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
  930. return 0;
  931. }
  932. int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
  933. {
  934. if ((flag != 0) && (flag != 1)) {
  935. pr_err("Illegal value %d\n", flag);
  936. return -EINVAL;
  937. }
  938. /*
  939. * We expect this value to be non-zero when generic Block Layer
  940. * Discard supported is detected iblock_create_virtdevice().
  941. */
  942. if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
  943. pr_err("Generic Block Discard not supported\n");
  944. return -ENOSYS;
  945. }
  946. dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
  947. pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
  948. dev, flag);
  949. return 0;
  950. }
  951. int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
  952. {
  953. if ((flag != 0) && (flag != 1)) {
  954. pr_err("Illegal value %d\n", flag);
  955. return -EINVAL;
  956. }
  957. /*
  958. * We expect this value to be non-zero when generic Block Layer
  959. * Discard supported is detected iblock_create_virtdevice().
  960. */
  961. if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
  962. pr_err("Generic Block Discard not supported\n");
  963. return -ENOSYS;
  964. }
  965. dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
  966. pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
  967. dev, flag);
  968. return 0;
  969. }
  970. int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
  971. {
  972. if ((flag != 0) && (flag != 1)) {
  973. pr_err("Illegal value %d\n", flag);
  974. return -EINVAL;
  975. }
  976. dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
  977. pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
  978. (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
  979. return 0;
  980. }
  981. int se_dev_set_is_nonrot(struct se_device *dev, int flag)
  982. {
  983. if ((flag != 0) && (flag != 1)) {
  984. printk(KERN_ERR "Illegal value %d\n", flag);
  985. return -EINVAL;
  986. }
  987. dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
  988. pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
  989. dev, flag);
  990. return 0;
  991. }
  992. int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
  993. {
  994. if (flag != 0) {
  995. printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
  996. " reordering not implemented\n", dev);
  997. return -ENOSYS;
  998. }
  999. dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
  1000. pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
  1001. return 0;
  1002. }
  1003. /*
  1004. * Note, this can only be called on unexported SE Device Object.
  1005. */
  1006. int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
  1007. {
  1008. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1009. pr_err("dev[%p]: Unable to change SE Device TCQ while"
  1010. " dev_export_obj: %d count exists\n", dev,
  1011. atomic_read(&dev->dev_export_obj.obj_access_count));
  1012. return -EINVAL;
  1013. }
  1014. if (!queue_depth) {
  1015. pr_err("dev[%p]: Illegal ZERO value for queue"
  1016. "_depth\n", dev);
  1017. return -EINVAL;
  1018. }
  1019. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1020. if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
  1021. pr_err("dev[%p]: Passed queue_depth: %u"
  1022. " exceeds TCM/SE_Device TCQ: %u\n",
  1023. dev, queue_depth,
  1024. dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
  1025. return -EINVAL;
  1026. }
  1027. } else {
  1028. if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
  1029. if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
  1030. pr_err("dev[%p]: Passed queue_depth:"
  1031. " %u exceeds TCM/SE_Device MAX"
  1032. " TCQ: %u\n", dev, queue_depth,
  1033. dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
  1034. return -EINVAL;
  1035. }
  1036. }
  1037. }
  1038. dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
  1039. pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
  1040. dev, queue_depth);
  1041. return 0;
  1042. }
  1043. int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
  1044. {
  1045. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1046. pr_err("dev[%p]: Unable to change SE Device"
  1047. " fabric_max_sectors while dev_export_obj: %d count exists\n",
  1048. dev, atomic_read(&dev->dev_export_obj.obj_access_count));
  1049. return -EINVAL;
  1050. }
  1051. if (!fabric_max_sectors) {
  1052. pr_err("dev[%p]: Illegal ZERO value for"
  1053. " fabric_max_sectors\n", dev);
  1054. return -EINVAL;
  1055. }
  1056. if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
  1057. pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
  1058. " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
  1059. DA_STATUS_MAX_SECTORS_MIN);
  1060. return -EINVAL;
  1061. }
  1062. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1063. if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
  1064. pr_err("dev[%p]: Passed fabric_max_sectors: %u"
  1065. " greater than TCM/SE_Device max_sectors:"
  1066. " %u\n", dev, fabric_max_sectors,
  1067. dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
  1068. return -EINVAL;
  1069. }
  1070. } else {
  1071. if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
  1072. pr_err("dev[%p]: Passed fabric_max_sectors: %u"
  1073. " greater than DA_STATUS_MAX_SECTORS_MAX:"
  1074. " %u\n", dev, fabric_max_sectors,
  1075. DA_STATUS_MAX_SECTORS_MAX);
  1076. return -EINVAL;
  1077. }
  1078. }
  1079. /*
  1080. * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
  1081. */
  1082. fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
  1083. dev->se_sub_dev->se_dev_attrib.block_size);
  1084. dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
  1085. pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
  1086. dev, fabric_max_sectors);
  1087. return 0;
  1088. }
  1089. int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
  1090. {
  1091. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1092. pr_err("dev[%p]: Unable to change SE Device"
  1093. " optimal_sectors while dev_export_obj: %d count exists\n",
  1094. dev, atomic_read(&dev->dev_export_obj.obj_access_count));
  1095. return -EINVAL;
  1096. }
  1097. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1098. pr_err("dev[%p]: Passed optimal_sectors cannot be"
  1099. " changed for TCM/pSCSI\n", dev);
  1100. return -EINVAL;
  1101. }
  1102. if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
  1103. pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
  1104. " greater than fabric_max_sectors: %u\n", dev,
  1105. optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
  1106. return -EINVAL;
  1107. }
  1108. dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
  1109. pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
  1110. dev, optimal_sectors);
  1111. return 0;
  1112. }
  1113. int se_dev_set_block_size(struct se_device *dev, u32 block_size)
  1114. {
  1115. if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
  1116. pr_err("dev[%p]: Unable to change SE Device block_size"
  1117. " while dev_export_obj: %d count exists\n", dev,
  1118. atomic_read(&dev->dev_export_obj.obj_access_count));
  1119. return -EINVAL;
  1120. }
  1121. if ((block_size != 512) &&
  1122. (block_size != 1024) &&
  1123. (block_size != 2048) &&
  1124. (block_size != 4096)) {
  1125. pr_err("dev[%p]: Illegal value for block_device: %u"
  1126. " for SE device, must be 512, 1024, 2048 or 4096\n",
  1127. dev, block_size);
  1128. return -EINVAL;
  1129. }
  1130. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1131. pr_err("dev[%p]: Not allowed to change block_size for"
  1132. " Physical Device, use for Linux/SCSI to change"
  1133. " block_size for underlying hardware\n", dev);
  1134. return -EINVAL;
  1135. }
  1136. dev->se_sub_dev->se_dev_attrib.block_size = block_size;
  1137. pr_debug("dev[%p]: SE Device block_size changed to %u\n",
  1138. dev, block_size);
  1139. return 0;
  1140. }
  1141. struct se_lun *core_dev_add_lun(
  1142. struct se_portal_group *tpg,
  1143. struct se_device *dev,
  1144. u32 lun)
  1145. {
  1146. struct se_lun *lun_p;
  1147. int rc;
  1148. if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
  1149. pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
  1150. atomic_read(&dev->dev_access_obj.obj_access_count));
  1151. return ERR_PTR(-EACCES);
  1152. }
  1153. lun_p = core_tpg_pre_addlun(tpg, lun);
  1154. if (IS_ERR(lun_p))
  1155. return lun_p;
  1156. rc = core_tpg_post_addlun(tpg, lun_p,
  1157. TRANSPORT_LUNFLAGS_READ_WRITE, dev);
  1158. if (rc < 0)
  1159. return ERR_PTR(rc);
  1160. pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
  1161. " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1162. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
  1163. tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
  1164. /*
  1165. * Update LUN maps for dynamically added initiators when
  1166. * generate_node_acl is enabled.
  1167. */
  1168. if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
  1169. struct se_node_acl *acl;
  1170. spin_lock_irq(&tpg->acl_node_lock);
  1171. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  1172. if (acl->dynamic_node_acl &&
  1173. (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
  1174. !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
  1175. spin_unlock_irq(&tpg->acl_node_lock);
  1176. core_tpg_add_node_to_devs(acl, tpg);
  1177. spin_lock_irq(&tpg->acl_node_lock);
  1178. }
  1179. }
  1180. spin_unlock_irq(&tpg->acl_node_lock);
  1181. }
  1182. return lun_p;
  1183. }
  1184. /* core_dev_del_lun():
  1185. *
  1186. *
  1187. */
  1188. int core_dev_del_lun(
  1189. struct se_portal_group *tpg,
  1190. u32 unpacked_lun)
  1191. {
  1192. struct se_lun *lun;
  1193. lun = core_tpg_pre_dellun(tpg, unpacked_lun);
  1194. if (IS_ERR(lun))
  1195. return PTR_ERR(lun);
  1196. core_tpg_post_dellun(tpg, lun);
  1197. pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
  1198. " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
  1199. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
  1200. tpg->se_tpg_tfo->get_fabric_name());
  1201. return 0;
  1202. }
  1203. struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
  1204. {
  1205. struct se_lun *lun;
  1206. spin_lock(&tpg->tpg_lun_lock);
  1207. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1208. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
  1209. "_PER_TPG-1: %u for Target Portal Group: %hu\n",
  1210. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1211. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1212. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1213. spin_unlock(&tpg->tpg_lun_lock);
  1214. return NULL;
  1215. }
  1216. lun = tpg->tpg_lun_list[unpacked_lun];
  1217. if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
  1218. pr_err("%s Logical Unit Number: %u is not free on"
  1219. " Target Portal Group: %hu, ignoring request.\n",
  1220. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1221. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1222. spin_unlock(&tpg->tpg_lun_lock);
  1223. return NULL;
  1224. }
  1225. spin_unlock(&tpg->tpg_lun_lock);
  1226. return lun;
  1227. }
  1228. /* core_dev_get_lun():
  1229. *
  1230. *
  1231. */
  1232. static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
  1233. {
  1234. struct se_lun *lun;
  1235. spin_lock(&tpg->tpg_lun_lock);
  1236. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1237. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
  1238. "_TPG-1: %u for Target Portal Group: %hu\n",
  1239. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1240. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1241. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1242. spin_unlock(&tpg->tpg_lun_lock);
  1243. return NULL;
  1244. }
  1245. lun = tpg->tpg_lun_list[unpacked_lun];
  1246. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  1247. pr_err("%s Logical Unit Number: %u is not active on"
  1248. " Target Portal Group: %hu, ignoring request.\n",
  1249. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1250. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1251. spin_unlock(&tpg->tpg_lun_lock);
  1252. return NULL;
  1253. }
  1254. spin_unlock(&tpg->tpg_lun_lock);
  1255. return lun;
  1256. }
  1257. struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
  1258. struct se_portal_group *tpg,
  1259. u32 mapped_lun,
  1260. char *initiatorname,
  1261. int *ret)
  1262. {
  1263. struct se_lun_acl *lacl;
  1264. struct se_node_acl *nacl;
  1265. if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
  1266. pr_err("%s InitiatorName exceeds maximum size.\n",
  1267. tpg->se_tpg_tfo->get_fabric_name());
  1268. *ret = -EOVERFLOW;
  1269. return NULL;
  1270. }
  1271. nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
  1272. if (!nacl) {
  1273. *ret = -EINVAL;
  1274. return NULL;
  1275. }
  1276. lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
  1277. if (!lacl) {
  1278. pr_err("Unable to allocate memory for struct se_lun_acl.\n");
  1279. *ret = -ENOMEM;
  1280. return NULL;
  1281. }
  1282. INIT_LIST_HEAD(&lacl->lacl_list);
  1283. lacl->mapped_lun = mapped_lun;
  1284. lacl->se_lun_nacl = nacl;
  1285. snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  1286. return lacl;
  1287. }
  1288. int core_dev_add_initiator_node_lun_acl(
  1289. struct se_portal_group *tpg,
  1290. struct se_lun_acl *lacl,
  1291. u32 unpacked_lun,
  1292. u32 lun_access)
  1293. {
  1294. struct se_lun *lun;
  1295. struct se_node_acl *nacl;
  1296. lun = core_dev_get_lun(tpg, unpacked_lun);
  1297. if (!lun) {
  1298. pr_err("%s Logical Unit Number: %u is not active on"
  1299. " Target Portal Group: %hu, ignoring request.\n",
  1300. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1301. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1302. return -EINVAL;
  1303. }
  1304. nacl = lacl->se_lun_nacl;
  1305. if (!nacl)
  1306. return -EINVAL;
  1307. if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
  1308. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
  1309. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  1310. lacl->se_lun = lun;
  1311. if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
  1312. lun_access, nacl, tpg) < 0)
  1313. return -EINVAL;
  1314. spin_lock(&lun->lun_acl_lock);
  1315. list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
  1316. atomic_inc(&lun->lun_acl_count);
  1317. smp_mb__after_atomic_inc();
  1318. spin_unlock(&lun->lun_acl_lock);
  1319. pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
  1320. " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  1321. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
  1322. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
  1323. lacl->initiatorname);
  1324. /*
  1325. * Check to see if there are any existing persistent reservation APTPL
  1326. * pre-registrations that need to be enabled for this LUN ACL..
  1327. */
  1328. core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
  1329. return 0;
  1330. }
  1331. /* core_dev_del_initiator_node_lun_acl():
  1332. *
  1333. *
  1334. */
  1335. int core_dev_del_initiator_node_lun_acl(
  1336. struct se_portal_group *tpg,
  1337. struct se_lun *lun,
  1338. struct se_lun_acl *lacl)
  1339. {
  1340. struct se_node_acl *nacl;
  1341. nacl = lacl->se_lun_nacl;
  1342. if (!nacl)
  1343. return -EINVAL;
  1344. spin_lock(&lun->lun_acl_lock);
  1345. list_del(&lacl->lacl_list);
  1346. atomic_dec(&lun->lun_acl_count);
  1347. smp_mb__after_atomic_dec();
  1348. spin_unlock(&lun->lun_acl_lock);
  1349. core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
  1350. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
  1351. lacl->se_lun = NULL;
  1352. pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
  1353. " InitiatorNode: %s Mapped LUN: %u\n",
  1354. tpg->se_tpg_tfo->get_fabric_name(),
  1355. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  1356. lacl->initiatorname, lacl->mapped_lun);
  1357. return 0;
  1358. }
  1359. void core_dev_free_initiator_node_lun_acl(
  1360. struct se_portal_group *tpg,
  1361. struct se_lun_acl *lacl)
  1362. {
  1363. pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
  1364. " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1365. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  1366. tpg->se_tpg_tfo->get_fabric_name(),
  1367. lacl->initiatorname, lacl->mapped_lun);
  1368. kfree(lacl);
  1369. }
  1370. int core_dev_setup_virtual_lun0(void)
  1371. {
  1372. struct se_hba *hba;
  1373. struct se_device *dev;
  1374. struct se_subsystem_dev *se_dev = NULL;
  1375. struct se_subsystem_api *t;
  1376. char buf[16];
  1377. int ret;
  1378. hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
  1379. if (IS_ERR(hba))
  1380. return PTR_ERR(hba);
  1381. lun0_hba = hba;
  1382. t = hba->transport;
  1383. se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
  1384. if (!se_dev) {
  1385. pr_err("Unable to allocate memory for"
  1386. " struct se_subsystem_dev\n");
  1387. ret = -ENOMEM;
  1388. goto out;
  1389. }
  1390. INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
  1391. spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
  1392. INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
  1393. INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
  1394. spin_lock_init(&se_dev->t10_pr.registration_lock);
  1395. spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
  1396. INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
  1397. spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
  1398. spin_lock_init(&se_dev->se_dev_lock);
  1399. se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
  1400. se_dev->t10_wwn.t10_sub_dev = se_dev;
  1401. se_dev->t10_alua.t10_sub_dev = se_dev;
  1402. se_dev->se_dev_attrib.da_sub_dev = se_dev;
  1403. se_dev->se_dev_hba = hba;
  1404. se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
  1405. if (!se_dev->se_dev_su_ptr) {
  1406. pr_err("Unable to locate subsystem dependent pointer"
  1407. " from allocate_virtdevice()\n");
  1408. ret = -ENOMEM;
  1409. goto out;
  1410. }
  1411. lun0_su_dev = se_dev;
  1412. memset(buf, 0, 16);
  1413. sprintf(buf, "rd_pages=8");
  1414. t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
  1415. dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
  1416. if (IS_ERR(dev)) {
  1417. ret = PTR_ERR(dev);
  1418. goto out;
  1419. }
  1420. se_dev->se_dev_ptr = dev;
  1421. g_lun0_dev = dev;
  1422. return 0;
  1423. out:
  1424. lun0_su_dev = NULL;
  1425. kfree(se_dev);
  1426. if (lun0_hba) {
  1427. core_delete_hba(lun0_hba);
  1428. lun0_hba = NULL;
  1429. }
  1430. return ret;
  1431. }
  1432. void core_dev_release_virtual_lun0(void)
  1433. {
  1434. struct se_hba *hba = lun0_hba;
  1435. struct se_subsystem_dev *su_dev = lun0_su_dev;
  1436. if (!hba)
  1437. return;
  1438. if (g_lun0_dev)
  1439. se_free_virtual_device(g_lun0_dev, hba);
  1440. kfree(su_dev);
  1441. core_delete_hba(hba);
  1442. }