target_core_alua.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109
  1. /*******************************************************************************
  2. * Filename: target_core_alua.c
  3. *
  4. * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
  5. *
  6. * Copyright (c) 2009-2010 Rising Tide Systems
  7. * Copyright (c) 2009-2010 Linux-iSCSI.org
  8. *
  9. * Nicholas A. Bellinger <nab@kernel.org>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24. *
  25. ******************************************************************************/
  26. #include <linux/slab.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/configfs.h>
  29. #include <linux/export.h>
  30. #include <scsi/scsi.h>
  31. #include <scsi/scsi_cmnd.h>
  32. #include <asm/unaligned.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_backend.h>
  35. #include <target/target_core_fabric.h>
  36. #include <target/target_core_configfs.h>
  37. #include "target_core_internal.h"
  38. #include "target_core_alua.h"
  39. #include "target_core_ua.h"
  40. static int core_alua_check_transition(int state, int *primary);
  41. static int core_alua_set_tg_pt_secondary_state(
  42. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
  43. struct se_port *port, int explict, int offline);
  44. static u16 alua_lu_gps_counter;
  45. static u32 alua_lu_gps_count;
  46. static DEFINE_SPINLOCK(lu_gps_lock);
  47. static LIST_HEAD(lu_gps_list);
  48. struct t10_alua_lu_gp *default_lu_gp;
  49. /*
  50. * REPORT_TARGET_PORT_GROUPS
  51. *
  52. * See spc4r17 section 6.27
  53. */
  54. int target_emulate_report_target_port_groups(struct se_cmd *cmd)
  55. {
  56. struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
  57. struct se_port *port;
  58. struct t10_alua_tg_pt_gp *tg_pt_gp;
  59. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
  60. unsigned char *buf;
  61. u32 rd_len = 0, off;
  62. int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
  63. /*
  64. * Skip over RESERVED area to first Target port group descriptor
  65. * depending on the PARAMETER DATA FORMAT type..
  66. */
  67. if (ext_hdr != 0)
  68. off = 8;
  69. else
  70. off = 4;
  71. if (cmd->data_length < off) {
  72. pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
  73. " small for %s header\n", cmd->data_length,
  74. (ext_hdr) ? "extended" : "normal");
  75. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  76. return -EINVAL;
  77. }
  78. buf = transport_kmap_data_sg(cmd);
  79. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  80. list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
  81. tg_pt_gp_list) {
  82. /*
  83. * Check if the Target port group and Target port descriptor list
  84. * based on tg_pt_gp_members count will fit into the response payload.
  85. * Otherwise, bump rd_len to let the initiator know we have exceeded
  86. * the allocation length and the response is truncated.
  87. */
  88. if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
  89. cmd->data_length) {
  90. rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
  91. continue;
  92. }
  93. /*
  94. * PREF: Preferred target port bit, determine if this
  95. * bit should be set for port group.
  96. */
  97. if (tg_pt_gp->tg_pt_gp_pref)
  98. buf[off] = 0x80;
  99. /*
  100. * Set the ASYMMETRIC ACCESS State
  101. */
  102. buf[off++] |= (atomic_read(
  103. &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
  104. /*
  105. * Set supported ASYMMETRIC ACCESS State bits
  106. */
  107. buf[off] = 0x80; /* T_SUP */
  108. buf[off] |= 0x40; /* O_SUP */
  109. buf[off] |= 0x8; /* U_SUP */
  110. buf[off] |= 0x4; /* S_SUP */
  111. buf[off] |= 0x2; /* AN_SUP */
  112. buf[off++] |= 0x1; /* AO_SUP */
  113. /*
  114. * TARGET PORT GROUP
  115. */
  116. buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
  117. buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
  118. off++; /* Skip over Reserved */
  119. /*
  120. * STATUS CODE
  121. */
  122. buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
  123. /*
  124. * Vendor Specific field
  125. */
  126. buf[off++] = 0x00;
  127. /*
  128. * TARGET PORT COUNT
  129. */
  130. buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
  131. rd_len += 8;
  132. spin_lock(&tg_pt_gp->tg_pt_gp_lock);
  133. list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
  134. tg_pt_gp_mem_list) {
  135. port = tg_pt_gp_mem->tg_pt;
  136. /*
  137. * Start Target Port descriptor format
  138. *
  139. * See spc4r17 section 6.2.7 Table 247
  140. */
  141. off += 2; /* Skip over Obsolete */
  142. /*
  143. * Set RELATIVE TARGET PORT IDENTIFIER
  144. */
  145. buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
  146. buf[off++] = (port->sep_rtpi & 0xff);
  147. rd_len += 4;
  148. }
  149. spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
  150. }
  151. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  152. /*
  153. * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
  154. */
  155. put_unaligned_be32(rd_len, &buf[0]);
  156. /*
  157. * Fill in the Extended header parameter data format if requested
  158. */
  159. if (ext_hdr != 0) {
  160. buf[4] = 0x10;
  161. /*
  162. * Set the implict transition time (in seconds) for the application
  163. * client to use as a base for it's transition timeout value.
  164. *
  165. * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
  166. * this CDB was received upon to determine this value individually
  167. * for ALUA target port group.
  168. */
  169. port = cmd->se_lun->lun_sep;
  170. tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
  171. if (tg_pt_gp_mem) {
  172. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  173. tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
  174. if (tg_pt_gp)
  175. buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs;
  176. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  177. }
  178. }
  179. transport_kunmap_data_sg(cmd);
  180. target_complete_cmd(cmd, GOOD);
  181. return 0;
  182. }
  183. /*
  184. * SET_TARGET_PORT_GROUPS for explict ALUA operation.
  185. *
  186. * See spc4r17 section 6.35
  187. */
  188. int target_emulate_set_target_port_groups(struct se_cmd *cmd)
  189. {
  190. struct se_device *dev = cmd->se_dev;
  191. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  192. struct se_port *port, *l_port = cmd->se_lun->lun_sep;
  193. struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
  194. struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
  195. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
  196. unsigned char *buf;
  197. unsigned char *ptr;
  198. u32 len = 4; /* Skip over RESERVED area in header */
  199. int alua_access_state, primary = 0, rc;
  200. u16 tg_pt_id, rtpi;
  201. if (!l_port) {
  202. cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  203. return -EINVAL;
  204. }
  205. if (cmd->data_length < 4) {
  206. pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
  207. " small\n", cmd->data_length);
  208. cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
  209. return -EINVAL;
  210. }
  211. buf = transport_kmap_data_sg(cmd);
  212. /*
  213. * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
  214. * for the local tg_pt_gp.
  215. */
  216. l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
  217. if (!l_tg_pt_gp_mem) {
  218. pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
  219. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  220. rc = -EINVAL;
  221. goto out;
  222. }
  223. spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
  224. l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
  225. if (!l_tg_pt_gp) {
  226. spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
  227. pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
  228. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  229. rc = -EINVAL;
  230. goto out;
  231. }
  232. rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
  233. spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
  234. if (!rc) {
  235. pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
  236. " while TPGS_EXPLICT_ALUA is disabled\n");
  237. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  238. rc = -EINVAL;
  239. goto out;
  240. }
  241. ptr = &buf[4]; /* Skip over RESERVED area in header */
  242. while (len < cmd->data_length) {
  243. alua_access_state = (ptr[0] & 0x0f);
  244. /*
  245. * Check the received ALUA access state, and determine if
  246. * the state is a primary or secondary target port asymmetric
  247. * access state.
  248. */
  249. rc = core_alua_check_transition(alua_access_state, &primary);
  250. if (rc != 0) {
  251. /*
  252. * If the SET TARGET PORT GROUPS attempts to establish
  253. * an invalid combination of target port asymmetric
  254. * access states or attempts to establish an
  255. * unsupported target port asymmetric access state,
  256. * then the command shall be terminated with CHECK
  257. * CONDITION status, with the sense key set to ILLEGAL
  258. * REQUEST, and the additional sense code set to INVALID
  259. * FIELD IN PARAMETER LIST.
  260. */
  261. cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
  262. rc = -EINVAL;
  263. goto out;
  264. }
  265. rc = -1;
  266. /*
  267. * If the ASYMMETRIC ACCESS STATE field (see table 267)
  268. * specifies a primary target port asymmetric access state,
  269. * then the TARGET PORT GROUP OR TARGET PORT field specifies
  270. * a primary target port group for which the primary target
  271. * port asymmetric access state shall be changed. If the
  272. * ASYMMETRIC ACCESS STATE field specifies a secondary target
  273. * port asymmetric access state, then the TARGET PORT GROUP OR
  274. * TARGET PORT field specifies the relative target port
  275. * identifier (see 3.1.120) of the target port for which the
  276. * secondary target port asymmetric access state shall be
  277. * changed.
  278. */
  279. if (primary) {
  280. tg_pt_id = get_unaligned_be16(ptr + 2);
  281. /*
  282. * Locate the matching target port group ID from
  283. * the global tg_pt_gp list
  284. */
  285. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  286. list_for_each_entry(tg_pt_gp,
  287. &su_dev->t10_alua.tg_pt_gps_list,
  288. tg_pt_gp_list) {
  289. if (!tg_pt_gp->tg_pt_gp_valid_id)
  290. continue;
  291. if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
  292. continue;
  293. atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
  294. smp_mb__after_atomic_inc();
  295. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  296. rc = core_alua_do_port_transition(tg_pt_gp,
  297. dev, l_port, nacl,
  298. alua_access_state, 1);
  299. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  300. atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
  301. smp_mb__after_atomic_dec();
  302. break;
  303. }
  304. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  305. /*
  306. * If not matching target port group ID can be located
  307. * throw an exception with ASCQ: INVALID_PARAMETER_LIST
  308. */
  309. if (rc != 0) {
  310. cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
  311. rc = -EINVAL;
  312. goto out;
  313. }
  314. } else {
  315. /*
  316. * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
  317. * the Target Port in question for the the incoming
  318. * SET_TARGET_PORT_GROUPS op.
  319. */
  320. rtpi = get_unaligned_be16(ptr + 2);
  321. /*
  322. * Locate the matching relative target port identifier
  323. * for the struct se_device storage object.
  324. */
  325. spin_lock(&dev->se_port_lock);
  326. list_for_each_entry(port, &dev->dev_sep_list,
  327. sep_list) {
  328. if (port->sep_rtpi != rtpi)
  329. continue;
  330. tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
  331. spin_unlock(&dev->se_port_lock);
  332. rc = core_alua_set_tg_pt_secondary_state(
  333. tg_pt_gp_mem, port, 1, 1);
  334. spin_lock(&dev->se_port_lock);
  335. break;
  336. }
  337. spin_unlock(&dev->se_port_lock);
  338. /*
  339. * If not matching relative target port identifier can
  340. * be located, throw an exception with ASCQ:
  341. * INVALID_PARAMETER_LIST
  342. */
  343. if (rc != 0) {
  344. cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
  345. rc = -EINVAL;
  346. goto out;
  347. }
  348. }
  349. ptr += 4;
  350. len += 4;
  351. }
  352. out:
  353. transport_kunmap_data_sg(cmd);
  354. if (!rc)
  355. target_complete_cmd(cmd, GOOD);
  356. return rc;
  357. }
  358. static inline int core_alua_state_nonoptimized(
  359. struct se_cmd *cmd,
  360. unsigned char *cdb,
  361. int nonop_delay_msecs,
  362. u8 *alua_ascq)
  363. {
  364. /*
  365. * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
  366. * later to determine if processing of this cmd needs to be
  367. * temporarily delayed for the Active/NonOptimized primary access state.
  368. */
  369. cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
  370. cmd->alua_nonop_delay = nonop_delay_msecs;
  371. return 0;
  372. }
  373. static inline int core_alua_state_standby(
  374. struct se_cmd *cmd,
  375. unsigned char *cdb,
  376. u8 *alua_ascq)
  377. {
  378. /*
  379. * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
  380. * spc4r17 section 5.9.2.4.4
  381. */
  382. switch (cdb[0]) {
  383. case INQUIRY:
  384. case LOG_SELECT:
  385. case LOG_SENSE:
  386. case MODE_SELECT:
  387. case MODE_SENSE:
  388. case REPORT_LUNS:
  389. case RECEIVE_DIAGNOSTIC:
  390. case SEND_DIAGNOSTIC:
  391. case MAINTENANCE_IN:
  392. switch (cdb[1] & 0x1f) {
  393. case MI_REPORT_TARGET_PGS:
  394. return 0;
  395. default:
  396. *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
  397. return 1;
  398. }
  399. case MAINTENANCE_OUT:
  400. switch (cdb[1]) {
  401. case MO_SET_TARGET_PGS:
  402. return 0;
  403. default:
  404. *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
  405. return 1;
  406. }
  407. case REQUEST_SENSE:
  408. case PERSISTENT_RESERVE_IN:
  409. case PERSISTENT_RESERVE_OUT:
  410. case READ_BUFFER:
  411. case WRITE_BUFFER:
  412. return 0;
  413. default:
  414. *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
  415. return 1;
  416. }
  417. return 0;
  418. }
  419. static inline int core_alua_state_unavailable(
  420. struct se_cmd *cmd,
  421. unsigned char *cdb,
  422. u8 *alua_ascq)
  423. {
  424. /*
  425. * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
  426. * spc4r17 section 5.9.2.4.5
  427. */
  428. switch (cdb[0]) {
  429. case INQUIRY:
  430. case REPORT_LUNS:
  431. case MAINTENANCE_IN:
  432. switch (cdb[1] & 0x1f) {
  433. case MI_REPORT_TARGET_PGS:
  434. return 0;
  435. default:
  436. *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
  437. return 1;
  438. }
  439. case MAINTENANCE_OUT:
  440. switch (cdb[1]) {
  441. case MO_SET_TARGET_PGS:
  442. return 0;
  443. default:
  444. *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
  445. return 1;
  446. }
  447. case REQUEST_SENSE:
  448. case READ_BUFFER:
  449. case WRITE_BUFFER:
  450. return 0;
  451. default:
  452. *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
  453. return 1;
  454. }
  455. return 0;
  456. }
  457. static inline int core_alua_state_transition(
  458. struct se_cmd *cmd,
  459. unsigned char *cdb,
  460. u8 *alua_ascq)
  461. {
  462. /*
  463. * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
  464. * spc4r17 section 5.9.2.5
  465. */
  466. switch (cdb[0]) {
  467. case INQUIRY:
  468. case REPORT_LUNS:
  469. case MAINTENANCE_IN:
  470. switch (cdb[1] & 0x1f) {
  471. case MI_REPORT_TARGET_PGS:
  472. return 0;
  473. default:
  474. *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
  475. return 1;
  476. }
  477. case REQUEST_SENSE:
  478. case READ_BUFFER:
  479. case WRITE_BUFFER:
  480. return 0;
  481. default:
  482. *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
  483. return 1;
  484. }
  485. return 0;
  486. }
  487. /*
  488. * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
  489. * in transport_cmd_sequencer(). This function is assigned to
  490. * struct t10_alua *->state_check() in core_setup_alua()
  491. */
  492. static int core_alua_state_check_nop(
  493. struct se_cmd *cmd,
  494. unsigned char *cdb,
  495. u8 *alua_ascq)
  496. {
  497. return 0;
  498. }
  499. /*
  500. * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
  501. * This function is assigned to struct t10_alua *->state_check() in
  502. * core_setup_alua()
  503. *
  504. * Also, this function can return three different return codes to
  505. * signal transport_generic_cmd_sequencer()
  506. *
  507. * return 1: Is used to signal LUN not accecsable, and check condition/not ready
  508. * return 0: Used to signal success
  509. * reutrn -1: Used to signal failure, and invalid cdb field
  510. */
  511. static int core_alua_state_check(
  512. struct se_cmd *cmd,
  513. unsigned char *cdb,
  514. u8 *alua_ascq)
  515. {
  516. struct se_lun *lun = cmd->se_lun;
  517. struct se_port *port = lun->lun_sep;
  518. struct t10_alua_tg_pt_gp *tg_pt_gp;
  519. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
  520. int out_alua_state, nonop_delay_msecs;
  521. if (!port)
  522. return 0;
  523. /*
  524. * First, check for a struct se_port specific secondary ALUA target port
  525. * access state: OFFLINE
  526. */
  527. if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
  528. *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
  529. pr_debug("ALUA: Got secondary offline status for local"
  530. " target port\n");
  531. *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
  532. return 1;
  533. }
  534. /*
  535. * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
  536. * ALUA target port group, to obtain current ALUA access state.
  537. * Otherwise look for the underlying struct se_device association with
  538. * a ALUA logical unit group.
  539. */
  540. tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
  541. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  542. tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
  543. out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
  544. nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
  545. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  546. /*
  547. * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
  548. * statement so the compiler knows explicitly to check this case first.
  549. * For the Optimized ALUA access state case, we want to process the
  550. * incoming fabric cmd ASAP..
  551. */
  552. if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
  553. return 0;
  554. switch (out_alua_state) {
  555. case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
  556. return core_alua_state_nonoptimized(cmd, cdb,
  557. nonop_delay_msecs, alua_ascq);
  558. case ALUA_ACCESS_STATE_STANDBY:
  559. return core_alua_state_standby(cmd, cdb, alua_ascq);
  560. case ALUA_ACCESS_STATE_UNAVAILABLE:
  561. return core_alua_state_unavailable(cmd, cdb, alua_ascq);
  562. case ALUA_ACCESS_STATE_TRANSITION:
  563. return core_alua_state_transition(cmd, cdb, alua_ascq);
  564. /*
  565. * OFFLINE is a secondary ALUA target port group access state, that is
  566. * handled above with struct se_port->sep_tg_pt_secondary_offline=1
  567. */
  568. case ALUA_ACCESS_STATE_OFFLINE:
  569. default:
  570. pr_err("Unknown ALUA access state: 0x%02x\n",
  571. out_alua_state);
  572. return -EINVAL;
  573. }
  574. return 0;
  575. }
  576. /*
  577. * Check implict and explict ALUA state change request.
  578. */
  579. static int core_alua_check_transition(int state, int *primary)
  580. {
  581. switch (state) {
  582. case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
  583. case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
  584. case ALUA_ACCESS_STATE_STANDBY:
  585. case ALUA_ACCESS_STATE_UNAVAILABLE:
  586. /*
  587. * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
  588. * defined as primary target port asymmetric access states.
  589. */
  590. *primary = 1;
  591. break;
  592. case ALUA_ACCESS_STATE_OFFLINE:
  593. /*
  594. * OFFLINE state is defined as a secondary target port
  595. * asymmetric access state.
  596. */
  597. *primary = 0;
  598. break;
  599. default:
  600. pr_err("Unknown ALUA access state: 0x%02x\n", state);
  601. return -EINVAL;
  602. }
  603. return 0;
  604. }
  605. static char *core_alua_dump_state(int state)
  606. {
  607. switch (state) {
  608. case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
  609. return "Active/Optimized";
  610. case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
  611. return "Active/NonOptimized";
  612. case ALUA_ACCESS_STATE_STANDBY:
  613. return "Standby";
  614. case ALUA_ACCESS_STATE_UNAVAILABLE:
  615. return "Unavailable";
  616. case ALUA_ACCESS_STATE_OFFLINE:
  617. return "Offline";
  618. default:
  619. return "Unknown";
  620. }
  621. return NULL;
  622. }
  623. char *core_alua_dump_status(int status)
  624. {
  625. switch (status) {
  626. case ALUA_STATUS_NONE:
  627. return "None";
  628. case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
  629. return "Altered by Explict STPG";
  630. case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
  631. return "Altered by Implict ALUA";
  632. default:
  633. return "Unknown";
  634. }
  635. return NULL;
  636. }
  637. /*
  638. * Used by fabric modules to determine when we need to delay processing
  639. * for the Active/NonOptimized paths..
  640. */
  641. int core_alua_check_nonop_delay(
  642. struct se_cmd *cmd)
  643. {
  644. if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
  645. return 0;
  646. if (in_interrupt())
  647. return 0;
  648. /*
  649. * The ALUA Active/NonOptimized access state delay can be disabled
  650. * in via configfs with a value of zero
  651. */
  652. if (!cmd->alua_nonop_delay)
  653. return 0;
  654. /*
  655. * struct se_cmd->alua_nonop_delay gets set by a target port group
  656. * defined interval in core_alua_state_nonoptimized()
  657. */
  658. msleep_interruptible(cmd->alua_nonop_delay);
  659. return 0;
  660. }
  661. EXPORT_SYMBOL(core_alua_check_nonop_delay);
  662. /*
  663. * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
  664. *
  665. */
  666. static int core_alua_write_tpg_metadata(
  667. const char *path,
  668. unsigned char *md_buf,
  669. u32 md_buf_len)
  670. {
  671. mm_segment_t old_fs;
  672. struct file *file;
  673. struct iovec iov[1];
  674. int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
  675. memset(iov, 0, sizeof(struct iovec));
  676. file = filp_open(path, flags, 0600);
  677. if (IS_ERR(file) || !file || !file->f_dentry) {
  678. pr_err("filp_open(%s) for ALUA metadata failed\n",
  679. path);
  680. return -ENODEV;
  681. }
  682. iov[0].iov_base = &md_buf[0];
  683. iov[0].iov_len = md_buf_len;
  684. old_fs = get_fs();
  685. set_fs(get_ds());
  686. ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
  687. set_fs(old_fs);
  688. if (ret < 0) {
  689. pr_err("Error writing ALUA metadata file: %s\n", path);
  690. filp_close(file, NULL);
  691. return -EIO;
  692. }
  693. filp_close(file, NULL);
  694. return 0;
  695. }
  696. /*
  697. * Called with tg_pt_gp->tg_pt_gp_md_mutex held
  698. */
  699. static int core_alua_update_tpg_primary_metadata(
  700. struct t10_alua_tg_pt_gp *tg_pt_gp,
  701. int primary_state,
  702. unsigned char *md_buf)
  703. {
  704. struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
  705. struct t10_wwn *wwn = &su_dev->t10_wwn;
  706. char path[ALUA_METADATA_PATH_LEN];
  707. int len;
  708. memset(path, 0, ALUA_METADATA_PATH_LEN);
  709. len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
  710. "tg_pt_gp_id=%hu\n"
  711. "alua_access_state=0x%02x\n"
  712. "alua_access_status=0x%02x\n",
  713. tg_pt_gp->tg_pt_gp_id, primary_state,
  714. tg_pt_gp->tg_pt_gp_alua_access_status);
  715. snprintf(path, ALUA_METADATA_PATH_LEN,
  716. "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
  717. config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
  718. return core_alua_write_tpg_metadata(path, md_buf, len);
  719. }
  720. static int core_alua_do_transition_tg_pt(
  721. struct t10_alua_tg_pt_gp *tg_pt_gp,
  722. struct se_port *l_port,
  723. struct se_node_acl *nacl,
  724. unsigned char *md_buf,
  725. int new_state,
  726. int explict)
  727. {
  728. struct se_dev_entry *se_deve;
  729. struct se_lun_acl *lacl;
  730. struct se_port *port;
  731. struct t10_alua_tg_pt_gp_member *mem;
  732. int old_state = 0;
  733. /*
  734. * Save the old primary ALUA access state, and set the current state
  735. * to ALUA_ACCESS_STATE_TRANSITION.
  736. */
  737. old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
  738. atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
  739. ALUA_ACCESS_STATE_TRANSITION);
  740. tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
  741. ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
  742. ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
  743. /*
  744. * Check for the optional ALUA primary state transition delay
  745. */
  746. if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
  747. msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
  748. spin_lock(&tg_pt_gp->tg_pt_gp_lock);
  749. list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
  750. tg_pt_gp_mem_list) {
  751. port = mem->tg_pt;
  752. /*
  753. * After an implicit target port asymmetric access state
  754. * change, a device server shall establish a unit attention
  755. * condition for the initiator port associated with every I_T
  756. * nexus with the additional sense code set to ASYMMETRIC
  757. * ACCESS STATE CHAGED.
  758. *
  759. * After an explicit target port asymmetric access state
  760. * change, a device server shall establish a unit attention
  761. * condition with the additional sense code set to ASYMMETRIC
  762. * ACCESS STATE CHANGED for the initiator port associated with
  763. * every I_T nexus other than the I_T nexus on which the SET
  764. * TARGET PORT GROUPS command
  765. */
  766. atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
  767. smp_mb__after_atomic_inc();
  768. spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
  769. spin_lock_bh(&port->sep_alua_lock);
  770. list_for_each_entry(se_deve, &port->sep_alua_list,
  771. alua_port_list) {
  772. lacl = se_deve->se_lun_acl;
  773. /*
  774. * se_deve->se_lun_acl pointer may be NULL for a
  775. * entry created without explict Node+MappedLUN ACLs
  776. */
  777. if (!lacl)
  778. continue;
  779. if (explict &&
  780. (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
  781. (l_port != NULL) && (l_port == port))
  782. continue;
  783. core_scsi3_ua_allocate(lacl->se_lun_nacl,
  784. se_deve->mapped_lun, 0x2A,
  785. ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
  786. }
  787. spin_unlock_bh(&port->sep_alua_lock);
  788. spin_lock(&tg_pt_gp->tg_pt_gp_lock);
  789. atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
  790. smp_mb__after_atomic_dec();
  791. }
  792. spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
  793. /*
  794. * Update the ALUA metadata buf that has been allocated in
  795. * core_alua_do_port_transition(), this metadata will be written
  796. * to struct file.
  797. *
  798. * Note that there is the case where we do not want to update the
  799. * metadata when the saved metadata is being parsed in userspace
  800. * when setting the existing port access state and access status.
  801. *
  802. * Also note that the failure to write out the ALUA metadata to
  803. * struct file does NOT affect the actual ALUA transition.
  804. */
  805. if (tg_pt_gp->tg_pt_gp_write_metadata) {
  806. mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
  807. core_alua_update_tpg_primary_metadata(tg_pt_gp,
  808. new_state, md_buf);
  809. mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
  810. }
  811. /*
  812. * Set the current primary ALUA access state to the requested new state
  813. */
  814. atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
  815. pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
  816. " from primary access state %s to %s\n", (explict) ? "explict" :
  817. "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
  818. tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
  819. core_alua_dump_state(new_state));
  820. return 0;
  821. }
  822. int core_alua_do_port_transition(
  823. struct t10_alua_tg_pt_gp *l_tg_pt_gp,
  824. struct se_device *l_dev,
  825. struct se_port *l_port,
  826. struct se_node_acl *l_nacl,
  827. int new_state,
  828. int explict)
  829. {
  830. struct se_device *dev;
  831. struct se_port *port;
  832. struct se_subsystem_dev *su_dev;
  833. struct se_node_acl *nacl;
  834. struct t10_alua_lu_gp *lu_gp;
  835. struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
  836. struct t10_alua_tg_pt_gp *tg_pt_gp;
  837. unsigned char *md_buf;
  838. int primary;
  839. if (core_alua_check_transition(new_state, &primary) != 0)
  840. return -EINVAL;
  841. md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
  842. if (!md_buf) {
  843. pr_err("Unable to allocate buf for ALUA metadata\n");
  844. return -ENOMEM;
  845. }
  846. local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
  847. spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
  848. lu_gp = local_lu_gp_mem->lu_gp;
  849. atomic_inc(&lu_gp->lu_gp_ref_cnt);
  850. smp_mb__after_atomic_inc();
  851. spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
  852. /*
  853. * For storage objects that are members of the 'default_lu_gp',
  854. * we only do transition on the passed *l_tp_pt_gp, and not
  855. * on all of the matching target port groups IDs in default_lu_gp.
  856. */
  857. if (!lu_gp->lu_gp_id) {
  858. /*
  859. * core_alua_do_transition_tg_pt() will always return
  860. * success.
  861. */
  862. core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
  863. md_buf, new_state, explict);
  864. atomic_dec(&lu_gp->lu_gp_ref_cnt);
  865. smp_mb__after_atomic_dec();
  866. kfree(md_buf);
  867. return 0;
  868. }
  869. /*
  870. * For all other LU groups aside from 'default_lu_gp', walk all of
  871. * the associated storage objects looking for a matching target port
  872. * group ID from the local target port group.
  873. */
  874. spin_lock(&lu_gp->lu_gp_lock);
  875. list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
  876. lu_gp_mem_list) {
  877. dev = lu_gp_mem->lu_gp_mem_dev;
  878. su_dev = dev->se_sub_dev;
  879. atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
  880. smp_mb__after_atomic_inc();
  881. spin_unlock(&lu_gp->lu_gp_lock);
  882. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  883. list_for_each_entry(tg_pt_gp,
  884. &su_dev->t10_alua.tg_pt_gps_list,
  885. tg_pt_gp_list) {
  886. if (!tg_pt_gp->tg_pt_gp_valid_id)
  887. continue;
  888. /*
  889. * If the target behavior port asymmetric access state
  890. * is changed for any target port group accessiable via
  891. * a logical unit within a LU group, the target port
  892. * behavior group asymmetric access states for the same
  893. * target port group accessible via other logical units
  894. * in that LU group will also change.
  895. */
  896. if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
  897. continue;
  898. if (l_tg_pt_gp == tg_pt_gp) {
  899. port = l_port;
  900. nacl = l_nacl;
  901. } else {
  902. port = NULL;
  903. nacl = NULL;
  904. }
  905. atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
  906. smp_mb__after_atomic_inc();
  907. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  908. /*
  909. * core_alua_do_transition_tg_pt() will always return
  910. * success.
  911. */
  912. core_alua_do_transition_tg_pt(tg_pt_gp, port,
  913. nacl, md_buf, new_state, explict);
  914. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  915. atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
  916. smp_mb__after_atomic_dec();
  917. }
  918. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  919. spin_lock(&lu_gp->lu_gp_lock);
  920. atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
  921. smp_mb__after_atomic_dec();
  922. }
  923. spin_unlock(&lu_gp->lu_gp_lock);
  924. pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
  925. " Group IDs: %hu %s transition to primary state: %s\n",
  926. config_item_name(&lu_gp->lu_gp_group.cg_item),
  927. l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
  928. core_alua_dump_state(new_state));
  929. atomic_dec(&lu_gp->lu_gp_ref_cnt);
  930. smp_mb__after_atomic_dec();
  931. kfree(md_buf);
  932. return 0;
  933. }
  934. /*
  935. * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
  936. */
  937. static int core_alua_update_tpg_secondary_metadata(
  938. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
  939. struct se_port *port,
  940. unsigned char *md_buf,
  941. u32 md_buf_len)
  942. {
  943. struct se_portal_group *se_tpg = port->sep_tpg;
  944. char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
  945. int len;
  946. memset(path, 0, ALUA_METADATA_PATH_LEN);
  947. memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
  948. len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
  949. se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
  950. if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
  951. snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
  952. se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
  953. len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
  954. "alua_tg_pt_status=0x%02x\n",
  955. atomic_read(&port->sep_tg_pt_secondary_offline),
  956. port->sep_tg_pt_secondary_stat);
  957. snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
  958. se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
  959. port->sep_lun->unpacked_lun);
  960. return core_alua_write_tpg_metadata(path, md_buf, len);
  961. }
  962. static int core_alua_set_tg_pt_secondary_state(
  963. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
  964. struct se_port *port,
  965. int explict,
  966. int offline)
  967. {
  968. struct t10_alua_tg_pt_gp *tg_pt_gp;
  969. unsigned char *md_buf;
  970. u32 md_buf_len;
  971. int trans_delay_msecs;
  972. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  973. tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
  974. if (!tg_pt_gp) {
  975. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  976. pr_err("Unable to complete secondary state"
  977. " transition\n");
  978. return -EINVAL;
  979. }
  980. trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
  981. /*
  982. * Set the secondary ALUA target port access state to OFFLINE
  983. * or release the previously secondary state for struct se_port
  984. */
  985. if (offline)
  986. atomic_set(&port->sep_tg_pt_secondary_offline, 1);
  987. else
  988. atomic_set(&port->sep_tg_pt_secondary_offline, 0);
  989. md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
  990. port->sep_tg_pt_secondary_stat = (explict) ?
  991. ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
  992. ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
  993. pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
  994. " to secondary access state: %s\n", (explict) ? "explict" :
  995. "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
  996. tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
  997. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  998. /*
  999. * Do the optional transition delay after we set the secondary
  1000. * ALUA access state.
  1001. */
  1002. if (trans_delay_msecs != 0)
  1003. msleep_interruptible(trans_delay_msecs);
  1004. /*
  1005. * See if we need to update the ALUA fabric port metadata for
  1006. * secondary state and status
  1007. */
  1008. if (port->sep_tg_pt_secondary_write_md) {
  1009. md_buf = kzalloc(md_buf_len, GFP_KERNEL);
  1010. if (!md_buf) {
  1011. pr_err("Unable to allocate md_buf for"
  1012. " secondary ALUA access metadata\n");
  1013. return -ENOMEM;
  1014. }
  1015. mutex_lock(&port->sep_tg_pt_md_mutex);
  1016. core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
  1017. md_buf, md_buf_len);
  1018. mutex_unlock(&port->sep_tg_pt_md_mutex);
  1019. kfree(md_buf);
  1020. }
  1021. return 0;
  1022. }
  1023. struct t10_alua_lu_gp *
  1024. core_alua_allocate_lu_gp(const char *name, int def_group)
  1025. {
  1026. struct t10_alua_lu_gp *lu_gp;
  1027. lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
  1028. if (!lu_gp) {
  1029. pr_err("Unable to allocate struct t10_alua_lu_gp\n");
  1030. return ERR_PTR(-ENOMEM);
  1031. }
  1032. INIT_LIST_HEAD(&lu_gp->lu_gp_node);
  1033. INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
  1034. spin_lock_init(&lu_gp->lu_gp_lock);
  1035. atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
  1036. if (def_group) {
  1037. lu_gp->lu_gp_id = alua_lu_gps_counter++;
  1038. lu_gp->lu_gp_valid_id = 1;
  1039. alua_lu_gps_count++;
  1040. }
  1041. return lu_gp;
  1042. }
  1043. int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
  1044. {
  1045. struct t10_alua_lu_gp *lu_gp_tmp;
  1046. u16 lu_gp_id_tmp;
  1047. /*
  1048. * The lu_gp->lu_gp_id may only be set once..
  1049. */
  1050. if (lu_gp->lu_gp_valid_id) {
  1051. pr_warn("ALUA LU Group already has a valid ID,"
  1052. " ignoring request\n");
  1053. return -EINVAL;
  1054. }
  1055. spin_lock(&lu_gps_lock);
  1056. if (alua_lu_gps_count == 0x0000ffff) {
  1057. pr_err("Maximum ALUA alua_lu_gps_count:"
  1058. " 0x0000ffff reached\n");
  1059. spin_unlock(&lu_gps_lock);
  1060. kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
  1061. return -ENOSPC;
  1062. }
  1063. again:
  1064. lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
  1065. alua_lu_gps_counter++;
  1066. list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
  1067. if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
  1068. if (!lu_gp_id)
  1069. goto again;
  1070. pr_warn("ALUA Logical Unit Group ID: %hu"
  1071. " already exists, ignoring request\n",
  1072. lu_gp_id);
  1073. spin_unlock(&lu_gps_lock);
  1074. return -EINVAL;
  1075. }
  1076. }
  1077. lu_gp->lu_gp_id = lu_gp_id_tmp;
  1078. lu_gp->lu_gp_valid_id = 1;
  1079. list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
  1080. alua_lu_gps_count++;
  1081. spin_unlock(&lu_gps_lock);
  1082. return 0;
  1083. }
  1084. static struct t10_alua_lu_gp_member *
  1085. core_alua_allocate_lu_gp_mem(struct se_device *dev)
  1086. {
  1087. struct t10_alua_lu_gp_member *lu_gp_mem;
  1088. lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
  1089. if (!lu_gp_mem) {
  1090. pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
  1091. return ERR_PTR(-ENOMEM);
  1092. }
  1093. INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
  1094. spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
  1095. atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
  1096. lu_gp_mem->lu_gp_mem_dev = dev;
  1097. dev->dev_alua_lu_gp_mem = lu_gp_mem;
  1098. return lu_gp_mem;
  1099. }
  1100. void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
  1101. {
  1102. struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
  1103. /*
  1104. * Once we have reached this point, config_item_put() has
  1105. * already been called from target_core_alua_drop_lu_gp().
  1106. *
  1107. * Here, we remove the *lu_gp from the global list so that
  1108. * no associations can be made while we are releasing
  1109. * struct t10_alua_lu_gp.
  1110. */
  1111. spin_lock(&lu_gps_lock);
  1112. list_del(&lu_gp->lu_gp_node);
  1113. alua_lu_gps_count--;
  1114. spin_unlock(&lu_gps_lock);
  1115. /*
  1116. * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
  1117. * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
  1118. * released with core_alua_put_lu_gp_from_name()
  1119. */
  1120. while (atomic_read(&lu_gp->lu_gp_ref_cnt))
  1121. cpu_relax();
  1122. /*
  1123. * Release reference to struct t10_alua_lu_gp * from all associated
  1124. * struct se_device.
  1125. */
  1126. spin_lock(&lu_gp->lu_gp_lock);
  1127. list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
  1128. &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
  1129. if (lu_gp_mem->lu_gp_assoc) {
  1130. list_del(&lu_gp_mem->lu_gp_mem_list);
  1131. lu_gp->lu_gp_members--;
  1132. lu_gp_mem->lu_gp_assoc = 0;
  1133. }
  1134. spin_unlock(&lu_gp->lu_gp_lock);
  1135. /*
  1136. *
  1137. * lu_gp_mem is associated with a single
  1138. * struct se_device->dev_alua_lu_gp_mem, and is released when
  1139. * struct se_device is released via core_alua_free_lu_gp_mem().
  1140. *
  1141. * If the passed lu_gp does NOT match the default_lu_gp, assume
  1142. * we want to re-assocate a given lu_gp_mem with default_lu_gp.
  1143. */
  1144. spin_lock(&lu_gp_mem->lu_gp_mem_lock);
  1145. if (lu_gp != default_lu_gp)
  1146. __core_alua_attach_lu_gp_mem(lu_gp_mem,
  1147. default_lu_gp);
  1148. else
  1149. lu_gp_mem->lu_gp = NULL;
  1150. spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
  1151. spin_lock(&lu_gp->lu_gp_lock);
  1152. }
  1153. spin_unlock(&lu_gp->lu_gp_lock);
  1154. kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
  1155. }
  1156. void core_alua_free_lu_gp_mem(struct se_device *dev)
  1157. {
  1158. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  1159. struct t10_alua *alua = &su_dev->t10_alua;
  1160. struct t10_alua_lu_gp *lu_gp;
  1161. struct t10_alua_lu_gp_member *lu_gp_mem;
  1162. if (alua->alua_type != SPC3_ALUA_EMULATED)
  1163. return;
  1164. lu_gp_mem = dev->dev_alua_lu_gp_mem;
  1165. if (!lu_gp_mem)
  1166. return;
  1167. while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
  1168. cpu_relax();
  1169. spin_lock(&lu_gp_mem->lu_gp_mem_lock);
  1170. lu_gp = lu_gp_mem->lu_gp;
  1171. if (lu_gp) {
  1172. spin_lock(&lu_gp->lu_gp_lock);
  1173. if (lu_gp_mem->lu_gp_assoc) {
  1174. list_del(&lu_gp_mem->lu_gp_mem_list);
  1175. lu_gp->lu_gp_members--;
  1176. lu_gp_mem->lu_gp_assoc = 0;
  1177. }
  1178. spin_unlock(&lu_gp->lu_gp_lock);
  1179. lu_gp_mem->lu_gp = NULL;
  1180. }
  1181. spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
  1182. kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
  1183. }
  1184. struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
  1185. {
  1186. struct t10_alua_lu_gp *lu_gp;
  1187. struct config_item *ci;
  1188. spin_lock(&lu_gps_lock);
  1189. list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
  1190. if (!lu_gp->lu_gp_valid_id)
  1191. continue;
  1192. ci = &lu_gp->lu_gp_group.cg_item;
  1193. if (!strcmp(config_item_name(ci), name)) {
  1194. atomic_inc(&lu_gp->lu_gp_ref_cnt);
  1195. spin_unlock(&lu_gps_lock);
  1196. return lu_gp;
  1197. }
  1198. }
  1199. spin_unlock(&lu_gps_lock);
  1200. return NULL;
  1201. }
  1202. void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
  1203. {
  1204. spin_lock(&lu_gps_lock);
  1205. atomic_dec(&lu_gp->lu_gp_ref_cnt);
  1206. spin_unlock(&lu_gps_lock);
  1207. }
  1208. /*
  1209. * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
  1210. */
  1211. void __core_alua_attach_lu_gp_mem(
  1212. struct t10_alua_lu_gp_member *lu_gp_mem,
  1213. struct t10_alua_lu_gp *lu_gp)
  1214. {
  1215. spin_lock(&lu_gp->lu_gp_lock);
  1216. lu_gp_mem->lu_gp = lu_gp;
  1217. lu_gp_mem->lu_gp_assoc = 1;
  1218. list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
  1219. lu_gp->lu_gp_members++;
  1220. spin_unlock(&lu_gp->lu_gp_lock);
  1221. }
  1222. /*
  1223. * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
  1224. */
  1225. void __core_alua_drop_lu_gp_mem(
  1226. struct t10_alua_lu_gp_member *lu_gp_mem,
  1227. struct t10_alua_lu_gp *lu_gp)
  1228. {
  1229. spin_lock(&lu_gp->lu_gp_lock);
  1230. list_del(&lu_gp_mem->lu_gp_mem_list);
  1231. lu_gp_mem->lu_gp = NULL;
  1232. lu_gp_mem->lu_gp_assoc = 0;
  1233. lu_gp->lu_gp_members--;
  1234. spin_unlock(&lu_gp->lu_gp_lock);
  1235. }
  1236. struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
  1237. struct se_subsystem_dev *su_dev,
  1238. const char *name,
  1239. int def_group)
  1240. {
  1241. struct t10_alua_tg_pt_gp *tg_pt_gp;
  1242. tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
  1243. if (!tg_pt_gp) {
  1244. pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
  1245. return NULL;
  1246. }
  1247. INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
  1248. INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
  1249. mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
  1250. spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
  1251. atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
  1252. tg_pt_gp->tg_pt_gp_su_dev = su_dev;
  1253. tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
  1254. atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
  1255. ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
  1256. /*
  1257. * Enable both explict and implict ALUA support by default
  1258. */
  1259. tg_pt_gp->tg_pt_gp_alua_access_type =
  1260. TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
  1261. /*
  1262. * Set the default Active/NonOptimized Delay in milliseconds
  1263. */
  1264. tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
  1265. tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
  1266. tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
  1267. if (def_group) {
  1268. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  1269. tg_pt_gp->tg_pt_gp_id =
  1270. su_dev->t10_alua.alua_tg_pt_gps_counter++;
  1271. tg_pt_gp->tg_pt_gp_valid_id = 1;
  1272. su_dev->t10_alua.alua_tg_pt_gps_count++;
  1273. list_add_tail(&tg_pt_gp->tg_pt_gp_list,
  1274. &su_dev->t10_alua.tg_pt_gps_list);
  1275. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  1276. }
  1277. return tg_pt_gp;
  1278. }
  1279. int core_alua_set_tg_pt_gp_id(
  1280. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1281. u16 tg_pt_gp_id)
  1282. {
  1283. struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
  1284. struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
  1285. u16 tg_pt_gp_id_tmp;
  1286. /*
  1287. * The tg_pt_gp->tg_pt_gp_id may only be set once..
  1288. */
  1289. if (tg_pt_gp->tg_pt_gp_valid_id) {
  1290. pr_warn("ALUA TG PT Group already has a valid ID,"
  1291. " ignoring request\n");
  1292. return -EINVAL;
  1293. }
  1294. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  1295. if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
  1296. pr_err("Maximum ALUA alua_tg_pt_gps_count:"
  1297. " 0x0000ffff reached\n");
  1298. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  1299. kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
  1300. return -ENOSPC;
  1301. }
  1302. again:
  1303. tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
  1304. su_dev->t10_alua.alua_tg_pt_gps_counter++;
  1305. list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
  1306. tg_pt_gp_list) {
  1307. if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
  1308. if (!tg_pt_gp_id)
  1309. goto again;
  1310. pr_err("ALUA Target Port Group ID: %hu already"
  1311. " exists, ignoring request\n", tg_pt_gp_id);
  1312. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  1313. return -EINVAL;
  1314. }
  1315. }
  1316. tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
  1317. tg_pt_gp->tg_pt_gp_valid_id = 1;
  1318. list_add_tail(&tg_pt_gp->tg_pt_gp_list,
  1319. &su_dev->t10_alua.tg_pt_gps_list);
  1320. su_dev->t10_alua.alua_tg_pt_gps_count++;
  1321. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  1322. return 0;
  1323. }
  1324. struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
  1325. struct se_port *port)
  1326. {
  1327. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
  1328. tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
  1329. GFP_KERNEL);
  1330. if (!tg_pt_gp_mem) {
  1331. pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
  1332. return ERR_PTR(-ENOMEM);
  1333. }
  1334. INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
  1335. spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1336. atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
  1337. tg_pt_gp_mem->tg_pt = port;
  1338. port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
  1339. return tg_pt_gp_mem;
  1340. }
  1341. void core_alua_free_tg_pt_gp(
  1342. struct t10_alua_tg_pt_gp *tg_pt_gp)
  1343. {
  1344. struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
  1345. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
  1346. /*
  1347. * Once we have reached this point, config_item_put() has already
  1348. * been called from target_core_alua_drop_tg_pt_gp().
  1349. *
  1350. * Here we remove *tg_pt_gp from the global list so that
  1351. * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
  1352. * can be made while we are releasing struct t10_alua_tg_pt_gp.
  1353. */
  1354. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  1355. list_del(&tg_pt_gp->tg_pt_gp_list);
  1356. su_dev->t10_alua.alua_tg_pt_gps_counter--;
  1357. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  1358. /*
  1359. * Allow a struct t10_alua_tg_pt_gp_member * referenced by
  1360. * core_alua_get_tg_pt_gp_by_name() in
  1361. * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
  1362. * to be released with core_alua_put_tg_pt_gp_from_name().
  1363. */
  1364. while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
  1365. cpu_relax();
  1366. /*
  1367. * Release reference to struct t10_alua_tg_pt_gp from all associated
  1368. * struct se_port.
  1369. */
  1370. spin_lock(&tg_pt_gp->tg_pt_gp_lock);
  1371. list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
  1372. &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
  1373. if (tg_pt_gp_mem->tg_pt_gp_assoc) {
  1374. list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
  1375. tg_pt_gp->tg_pt_gp_members--;
  1376. tg_pt_gp_mem->tg_pt_gp_assoc = 0;
  1377. }
  1378. spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
  1379. /*
  1380. * tg_pt_gp_mem is associated with a single
  1381. * se_port->sep_alua_tg_pt_gp_mem, and is released via
  1382. * core_alua_free_tg_pt_gp_mem().
  1383. *
  1384. * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
  1385. * assume we want to re-assocate a given tg_pt_gp_mem with
  1386. * default_tg_pt_gp.
  1387. */
  1388. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1389. if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
  1390. __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
  1391. su_dev->t10_alua.default_tg_pt_gp);
  1392. } else
  1393. tg_pt_gp_mem->tg_pt_gp = NULL;
  1394. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1395. spin_lock(&tg_pt_gp->tg_pt_gp_lock);
  1396. }
  1397. spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
  1398. kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
  1399. }
  1400. void core_alua_free_tg_pt_gp_mem(struct se_port *port)
  1401. {
  1402. struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
  1403. struct t10_alua *alua = &su_dev->t10_alua;
  1404. struct t10_alua_tg_pt_gp *tg_pt_gp;
  1405. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
  1406. if (alua->alua_type != SPC3_ALUA_EMULATED)
  1407. return;
  1408. tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
  1409. if (!tg_pt_gp_mem)
  1410. return;
  1411. while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
  1412. cpu_relax();
  1413. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1414. tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
  1415. if (tg_pt_gp) {
  1416. spin_lock(&tg_pt_gp->tg_pt_gp_lock);
  1417. if (tg_pt_gp_mem->tg_pt_gp_assoc) {
  1418. list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
  1419. tg_pt_gp->tg_pt_gp_members--;
  1420. tg_pt_gp_mem->tg_pt_gp_assoc = 0;
  1421. }
  1422. spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
  1423. tg_pt_gp_mem->tg_pt_gp = NULL;
  1424. }
  1425. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1426. kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
  1427. }
  1428. static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
  1429. struct se_subsystem_dev *su_dev,
  1430. const char *name)
  1431. {
  1432. struct t10_alua_tg_pt_gp *tg_pt_gp;
  1433. struct config_item *ci;
  1434. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  1435. list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
  1436. tg_pt_gp_list) {
  1437. if (!tg_pt_gp->tg_pt_gp_valid_id)
  1438. continue;
  1439. ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
  1440. if (!strcmp(config_item_name(ci), name)) {
  1441. atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
  1442. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  1443. return tg_pt_gp;
  1444. }
  1445. }
  1446. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  1447. return NULL;
  1448. }
  1449. static void core_alua_put_tg_pt_gp_from_name(
  1450. struct t10_alua_tg_pt_gp *tg_pt_gp)
  1451. {
  1452. struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
  1453. spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  1454. atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
  1455. spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
  1456. }
  1457. /*
  1458. * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
  1459. */
  1460. void __core_alua_attach_tg_pt_gp_mem(
  1461. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
  1462. struct t10_alua_tg_pt_gp *tg_pt_gp)
  1463. {
  1464. spin_lock(&tg_pt_gp->tg_pt_gp_lock);
  1465. tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
  1466. tg_pt_gp_mem->tg_pt_gp_assoc = 1;
  1467. list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
  1468. &tg_pt_gp->tg_pt_gp_mem_list);
  1469. tg_pt_gp->tg_pt_gp_members++;
  1470. spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
  1471. }
  1472. /*
  1473. * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
  1474. */
  1475. static void __core_alua_drop_tg_pt_gp_mem(
  1476. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
  1477. struct t10_alua_tg_pt_gp *tg_pt_gp)
  1478. {
  1479. spin_lock(&tg_pt_gp->tg_pt_gp_lock);
  1480. list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
  1481. tg_pt_gp_mem->tg_pt_gp = NULL;
  1482. tg_pt_gp_mem->tg_pt_gp_assoc = 0;
  1483. tg_pt_gp->tg_pt_gp_members--;
  1484. spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
  1485. }
  1486. ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
  1487. {
  1488. struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
  1489. struct config_item *tg_pt_ci;
  1490. struct t10_alua *alua = &su_dev->t10_alua;
  1491. struct t10_alua_tg_pt_gp *tg_pt_gp;
  1492. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
  1493. ssize_t len = 0;
  1494. if (alua->alua_type != SPC3_ALUA_EMULATED)
  1495. return len;
  1496. tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
  1497. if (!tg_pt_gp_mem)
  1498. return len;
  1499. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1500. tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
  1501. if (tg_pt_gp) {
  1502. tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
  1503. len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
  1504. " %hu\nTG Port Primary Access State: %s\nTG Port "
  1505. "Primary Access Status: %s\nTG Port Secondary Access"
  1506. " State: %s\nTG Port Secondary Access Status: %s\n",
  1507. config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
  1508. core_alua_dump_state(atomic_read(
  1509. &tg_pt_gp->tg_pt_gp_alua_access_state)),
  1510. core_alua_dump_status(
  1511. tg_pt_gp->tg_pt_gp_alua_access_status),
  1512. (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
  1513. "Offline" : "None",
  1514. core_alua_dump_status(port->sep_tg_pt_secondary_stat));
  1515. }
  1516. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1517. return len;
  1518. }
  1519. ssize_t core_alua_store_tg_pt_gp_info(
  1520. struct se_port *port,
  1521. const char *page,
  1522. size_t count)
  1523. {
  1524. struct se_portal_group *tpg;
  1525. struct se_lun *lun;
  1526. struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
  1527. struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
  1528. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
  1529. unsigned char buf[TG_PT_GROUP_NAME_BUF];
  1530. int move = 0;
  1531. tpg = port->sep_tpg;
  1532. lun = port->sep_lun;
  1533. if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
  1534. pr_warn("SPC3_ALUA_EMULATED not enabled for"
  1535. " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
  1536. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  1537. config_item_name(&lun->lun_group.cg_item));
  1538. return -EINVAL;
  1539. }
  1540. if (count > TG_PT_GROUP_NAME_BUF) {
  1541. pr_err("ALUA Target Port Group alias too large!\n");
  1542. return -EINVAL;
  1543. }
  1544. memset(buf, 0, TG_PT_GROUP_NAME_BUF);
  1545. memcpy(buf, page, count);
  1546. /*
  1547. * Any ALUA target port group alias besides "NULL" means we will be
  1548. * making a new group association.
  1549. */
  1550. if (strcmp(strstrip(buf), "NULL")) {
  1551. /*
  1552. * core_alua_get_tg_pt_gp_by_name() will increment reference to
  1553. * struct t10_alua_tg_pt_gp. This reference is released with
  1554. * core_alua_put_tg_pt_gp_from_name() below.
  1555. */
  1556. tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
  1557. strstrip(buf));
  1558. if (!tg_pt_gp_new)
  1559. return -ENODEV;
  1560. }
  1561. tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
  1562. if (!tg_pt_gp_mem) {
  1563. if (tg_pt_gp_new)
  1564. core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
  1565. pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
  1566. return -EINVAL;
  1567. }
  1568. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1569. tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
  1570. if (tg_pt_gp) {
  1571. /*
  1572. * Clearing an existing tg_pt_gp association, and replacing
  1573. * with the default_tg_pt_gp.
  1574. */
  1575. if (!tg_pt_gp_new) {
  1576. pr_debug("Target_Core_ConfigFS: Moving"
  1577. " %s/tpgt_%hu/%s from ALUA Target Port Group:"
  1578. " alua/%s, ID: %hu back to"
  1579. " default_tg_pt_gp\n",
  1580. tpg->se_tpg_tfo->tpg_get_wwn(tpg),
  1581. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  1582. config_item_name(&lun->lun_group.cg_item),
  1583. config_item_name(
  1584. &tg_pt_gp->tg_pt_gp_group.cg_item),
  1585. tg_pt_gp->tg_pt_gp_id);
  1586. __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
  1587. __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
  1588. su_dev->t10_alua.default_tg_pt_gp);
  1589. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1590. return count;
  1591. }
  1592. /*
  1593. * Removing existing association of tg_pt_gp_mem with tg_pt_gp
  1594. */
  1595. __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
  1596. move = 1;
  1597. }
  1598. /*
  1599. * Associate tg_pt_gp_mem with tg_pt_gp_new.
  1600. */
  1601. __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
  1602. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  1603. pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
  1604. " Target Port Group: alua/%s, ID: %hu\n", (move) ?
  1605. "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
  1606. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  1607. config_item_name(&lun->lun_group.cg_item),
  1608. config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
  1609. tg_pt_gp_new->tg_pt_gp_id);
  1610. core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
  1611. return count;
  1612. }
  1613. ssize_t core_alua_show_access_type(
  1614. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1615. char *page)
  1616. {
  1617. if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
  1618. (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
  1619. return sprintf(page, "Implict and Explict\n");
  1620. else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
  1621. return sprintf(page, "Implict\n");
  1622. else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
  1623. return sprintf(page, "Explict\n");
  1624. else
  1625. return sprintf(page, "None\n");
  1626. }
  1627. ssize_t core_alua_store_access_type(
  1628. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1629. const char *page,
  1630. size_t count)
  1631. {
  1632. unsigned long tmp;
  1633. int ret;
  1634. ret = strict_strtoul(page, 0, &tmp);
  1635. if (ret < 0) {
  1636. pr_err("Unable to extract alua_access_type\n");
  1637. return -EINVAL;
  1638. }
  1639. if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
  1640. pr_err("Illegal value for alua_access_type:"
  1641. " %lu\n", tmp);
  1642. return -EINVAL;
  1643. }
  1644. if (tmp == 3)
  1645. tg_pt_gp->tg_pt_gp_alua_access_type =
  1646. TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
  1647. else if (tmp == 2)
  1648. tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
  1649. else if (tmp == 1)
  1650. tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
  1651. else
  1652. tg_pt_gp->tg_pt_gp_alua_access_type = 0;
  1653. return count;
  1654. }
  1655. ssize_t core_alua_show_nonop_delay_msecs(
  1656. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1657. char *page)
  1658. {
  1659. return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
  1660. }
  1661. ssize_t core_alua_store_nonop_delay_msecs(
  1662. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1663. const char *page,
  1664. size_t count)
  1665. {
  1666. unsigned long tmp;
  1667. int ret;
  1668. ret = strict_strtoul(page, 0, &tmp);
  1669. if (ret < 0) {
  1670. pr_err("Unable to extract nonop_delay_msecs\n");
  1671. return -EINVAL;
  1672. }
  1673. if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
  1674. pr_err("Passed nonop_delay_msecs: %lu, exceeds"
  1675. " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
  1676. ALUA_MAX_NONOP_DELAY_MSECS);
  1677. return -EINVAL;
  1678. }
  1679. tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
  1680. return count;
  1681. }
  1682. ssize_t core_alua_show_trans_delay_msecs(
  1683. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1684. char *page)
  1685. {
  1686. return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
  1687. }
  1688. ssize_t core_alua_store_trans_delay_msecs(
  1689. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1690. const char *page,
  1691. size_t count)
  1692. {
  1693. unsigned long tmp;
  1694. int ret;
  1695. ret = strict_strtoul(page, 0, &tmp);
  1696. if (ret < 0) {
  1697. pr_err("Unable to extract trans_delay_msecs\n");
  1698. return -EINVAL;
  1699. }
  1700. if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
  1701. pr_err("Passed trans_delay_msecs: %lu, exceeds"
  1702. " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
  1703. ALUA_MAX_TRANS_DELAY_MSECS);
  1704. return -EINVAL;
  1705. }
  1706. tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
  1707. return count;
  1708. }
  1709. ssize_t core_alua_show_implict_trans_secs(
  1710. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1711. char *page)
  1712. {
  1713. return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs);
  1714. }
  1715. ssize_t core_alua_store_implict_trans_secs(
  1716. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1717. const char *page,
  1718. size_t count)
  1719. {
  1720. unsigned long tmp;
  1721. int ret;
  1722. ret = strict_strtoul(page, 0, &tmp);
  1723. if (ret < 0) {
  1724. pr_err("Unable to extract implict_trans_secs\n");
  1725. return -EINVAL;
  1726. }
  1727. if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
  1728. pr_err("Passed implict_trans_secs: %lu, exceeds"
  1729. " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp,
  1730. ALUA_MAX_IMPLICT_TRANS_SECS);
  1731. return -EINVAL;
  1732. }
  1733. tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp;
  1734. return count;
  1735. }
  1736. ssize_t core_alua_show_preferred_bit(
  1737. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1738. char *page)
  1739. {
  1740. return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
  1741. }
  1742. ssize_t core_alua_store_preferred_bit(
  1743. struct t10_alua_tg_pt_gp *tg_pt_gp,
  1744. const char *page,
  1745. size_t count)
  1746. {
  1747. unsigned long tmp;
  1748. int ret;
  1749. ret = strict_strtoul(page, 0, &tmp);
  1750. if (ret < 0) {
  1751. pr_err("Unable to extract preferred ALUA value\n");
  1752. return -EINVAL;
  1753. }
  1754. if ((tmp != 0) && (tmp != 1)) {
  1755. pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
  1756. return -EINVAL;
  1757. }
  1758. tg_pt_gp->tg_pt_gp_pref = (int)tmp;
  1759. return count;
  1760. }
  1761. ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
  1762. {
  1763. if (!lun->lun_sep)
  1764. return -ENODEV;
  1765. return sprintf(page, "%d\n",
  1766. atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
  1767. }
  1768. ssize_t core_alua_store_offline_bit(
  1769. struct se_lun *lun,
  1770. const char *page,
  1771. size_t count)
  1772. {
  1773. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
  1774. unsigned long tmp;
  1775. int ret;
  1776. if (!lun->lun_sep)
  1777. return -ENODEV;
  1778. ret = strict_strtoul(page, 0, &tmp);
  1779. if (ret < 0) {
  1780. pr_err("Unable to extract alua_tg_pt_offline value\n");
  1781. return -EINVAL;
  1782. }
  1783. if ((tmp != 0) && (tmp != 1)) {
  1784. pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
  1785. tmp);
  1786. return -EINVAL;
  1787. }
  1788. tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
  1789. if (!tg_pt_gp_mem) {
  1790. pr_err("Unable to locate *tg_pt_gp_mem\n");
  1791. return -EINVAL;
  1792. }
  1793. ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
  1794. lun->lun_sep, 0, (int)tmp);
  1795. if (ret < 0)
  1796. return -EINVAL;
  1797. return count;
  1798. }
  1799. ssize_t core_alua_show_secondary_status(
  1800. struct se_lun *lun,
  1801. char *page)
  1802. {
  1803. return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
  1804. }
  1805. ssize_t core_alua_store_secondary_status(
  1806. struct se_lun *lun,
  1807. const char *page,
  1808. size_t count)
  1809. {
  1810. unsigned long tmp;
  1811. int ret;
  1812. ret = strict_strtoul(page, 0, &tmp);
  1813. if (ret < 0) {
  1814. pr_err("Unable to extract alua_tg_pt_status\n");
  1815. return -EINVAL;
  1816. }
  1817. if ((tmp != ALUA_STATUS_NONE) &&
  1818. (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
  1819. (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
  1820. pr_err("Illegal value for alua_tg_pt_status: %lu\n",
  1821. tmp);
  1822. return -EINVAL;
  1823. }
  1824. lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
  1825. return count;
  1826. }
  1827. ssize_t core_alua_show_secondary_write_metadata(
  1828. struct se_lun *lun,
  1829. char *page)
  1830. {
  1831. return sprintf(page, "%d\n",
  1832. lun->lun_sep->sep_tg_pt_secondary_write_md);
  1833. }
  1834. ssize_t core_alua_store_secondary_write_metadata(
  1835. struct se_lun *lun,
  1836. const char *page,
  1837. size_t count)
  1838. {
  1839. unsigned long tmp;
  1840. int ret;
  1841. ret = strict_strtoul(page, 0, &tmp);
  1842. if (ret < 0) {
  1843. pr_err("Unable to extract alua_tg_pt_write_md\n");
  1844. return -EINVAL;
  1845. }
  1846. if ((tmp != 0) && (tmp != 1)) {
  1847. pr_err("Illegal value for alua_tg_pt_write_md:"
  1848. " %lu\n", tmp);
  1849. return -EINVAL;
  1850. }
  1851. lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
  1852. return count;
  1853. }
  1854. int core_setup_alua(struct se_device *dev, int force_pt)
  1855. {
  1856. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  1857. struct t10_alua *alua = &su_dev->t10_alua;
  1858. struct t10_alua_lu_gp_member *lu_gp_mem;
  1859. /*
  1860. * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
  1861. * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
  1862. * cause a problem because libata and some SATA RAID HBAs appear
  1863. * under Linux/SCSI, but emulate SCSI logic themselves.
  1864. */
  1865. if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
  1866. !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
  1867. alua->alua_type = SPC_ALUA_PASSTHROUGH;
  1868. alua->alua_state_check = &core_alua_state_check_nop;
  1869. pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
  1870. " emulation\n", dev->transport->name);
  1871. return 0;
  1872. }
  1873. /*
  1874. * If SPC-3 or above is reported by real or emulated struct se_device,
  1875. * use emulated ALUA.
  1876. */
  1877. if (dev->transport->get_device_rev(dev) >= SCSI_3) {
  1878. pr_debug("%s: Enabling ALUA Emulation for SPC-3"
  1879. " device\n", dev->transport->name);
  1880. /*
  1881. * Associate this struct se_device with the default ALUA
  1882. * LUN Group.
  1883. */
  1884. lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
  1885. if (IS_ERR(lu_gp_mem))
  1886. return PTR_ERR(lu_gp_mem);
  1887. alua->alua_type = SPC3_ALUA_EMULATED;
  1888. alua->alua_state_check = &core_alua_state_check;
  1889. spin_lock(&lu_gp_mem->lu_gp_mem_lock);
  1890. __core_alua_attach_lu_gp_mem(lu_gp_mem,
  1891. default_lu_gp);
  1892. spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
  1893. pr_debug("%s: Adding to default ALUA LU Group:"
  1894. " core/alua/lu_gps/default_lu_gp\n",
  1895. dev->transport->name);
  1896. } else {
  1897. alua->alua_type = SPC2_ALUA_DISABLED;
  1898. alua->alua_state_check = &core_alua_state_check_nop;
  1899. pr_debug("%s: Disabling ALUA Emulation for SPC-2"
  1900. " device\n", dev->transport->name);
  1901. }
  1902. return 0;
  1903. }