sbp_target.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608
  1. /*
  2. * SBP2 target driver (SCSI over IEEE1394 in target mode)
  3. *
  4. * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #define KMSG_COMPONENT "sbp_target"
  21. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/init.h>
  25. #include <linux/types.h>
  26. #include <linux/string.h>
  27. #include <linux/configfs.h>
  28. #include <linux/ctype.h>
  29. #include <linux/firewire.h>
  30. #include <linux/firewire-constants.h>
  31. #include <scsi/scsi.h>
  32. #include <scsi/scsi_tcq.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_backend.h>
  35. #include <target/target_core_fabric.h>
  36. #include <target/target_core_fabric_configfs.h>
  37. #include <target/target_core_configfs.h>
  38. #include <target/configfs_macros.h>
  39. #include <asm/unaligned.h>
  40. #include "sbp_target.h"
  41. /* Local pointer to allocated TCM configfs fabric module */
  42. static struct target_fabric_configfs *sbp_fabric_configfs;
  43. /* FireWire address region for management and command block address handlers */
  44. static const struct fw_address_region sbp_register_region = {
  45. .start = CSR_REGISTER_BASE + 0x10000,
  46. .end = 0x1000000000000ULL,
  47. };
  48. static const u32 sbp_unit_directory_template[] = {
  49. 0x1200609e, /* unit_specifier_id: NCITS/T10 */
  50. 0x13010483, /* unit_sw_version: 1155D Rev 4 */
  51. 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
  52. 0x390104d8, /* command_set: SPC-2 */
  53. 0x3b000000, /* command_set_revision: 0 */
  54. 0x3c000001, /* firmware_revision: 1 */
  55. };
  56. #define SESSION_MAINTENANCE_INTERVAL HZ
  57. static atomic_t login_id = ATOMIC_INIT(0);
  58. static void session_maintenance_work(struct work_struct *);
  59. static int sbp_run_transaction(struct fw_card *, int, int, int, int,
  60. unsigned long long, void *, size_t);
  61. static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
  62. {
  63. int ret;
  64. __be32 high, low;
  65. ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  66. req->node_addr, req->generation, req->speed,
  67. (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
  68. &high, sizeof(high));
  69. if (ret != RCODE_COMPLETE)
  70. return ret;
  71. ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  72. req->node_addr, req->generation, req->speed,
  73. (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
  74. &low, sizeof(low));
  75. if (ret != RCODE_COMPLETE)
  76. return ret;
  77. *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
  78. return RCODE_COMPLETE;
  79. }
  80. static struct sbp_session *sbp_session_find_by_guid(
  81. struct sbp_tpg *tpg, u64 guid)
  82. {
  83. struct se_session *se_sess;
  84. struct sbp_session *sess, *found = NULL;
  85. spin_lock_bh(&tpg->se_tpg.session_lock);
  86. list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  87. sess = se_sess->fabric_sess_ptr;
  88. if (sess->guid == guid)
  89. found = sess;
  90. }
  91. spin_unlock_bh(&tpg->se_tpg.session_lock);
  92. return found;
  93. }
  94. static struct sbp_login_descriptor *sbp_login_find_by_lun(
  95. struct sbp_session *session, struct se_lun *lun)
  96. {
  97. struct sbp_login_descriptor *login, *found = NULL;
  98. spin_lock_bh(&session->lock);
  99. list_for_each_entry(login, &session->login_list, link) {
  100. if (login->lun == lun)
  101. found = login;
  102. }
  103. spin_unlock_bh(&session->lock);
  104. return found;
  105. }
  106. static int sbp_login_count_all_by_lun(
  107. struct sbp_tpg *tpg,
  108. struct se_lun *lun,
  109. int exclusive)
  110. {
  111. struct se_session *se_sess;
  112. struct sbp_session *sess;
  113. struct sbp_login_descriptor *login;
  114. int count = 0;
  115. spin_lock_bh(&tpg->se_tpg.session_lock);
  116. list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  117. sess = se_sess->fabric_sess_ptr;
  118. spin_lock_bh(&sess->lock);
  119. list_for_each_entry(login, &sess->login_list, link) {
  120. if (login->lun != lun)
  121. continue;
  122. if (!exclusive || login->exclusive)
  123. count++;
  124. }
  125. spin_unlock_bh(&sess->lock);
  126. }
  127. spin_unlock_bh(&tpg->se_tpg.session_lock);
  128. return count;
  129. }
  130. static struct sbp_login_descriptor *sbp_login_find_by_id(
  131. struct sbp_tpg *tpg, int login_id)
  132. {
  133. struct se_session *se_sess;
  134. struct sbp_session *sess;
  135. struct sbp_login_descriptor *login, *found = NULL;
  136. spin_lock_bh(&tpg->se_tpg.session_lock);
  137. list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  138. sess = se_sess->fabric_sess_ptr;
  139. spin_lock_bh(&sess->lock);
  140. list_for_each_entry(login, &sess->login_list, link) {
  141. if (login->login_id == login_id)
  142. found = login;
  143. }
  144. spin_unlock_bh(&sess->lock);
  145. }
  146. spin_unlock_bh(&tpg->se_tpg.session_lock);
  147. return found;
  148. }
  149. static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
  150. {
  151. struct se_portal_group *se_tpg = &tpg->se_tpg;
  152. struct se_lun *se_lun;
  153. if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
  154. return ERR_PTR(-EINVAL);
  155. spin_lock(&se_tpg->tpg_lun_lock);
  156. se_lun = se_tpg->tpg_lun_list[lun];
  157. if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
  158. se_lun = ERR_PTR(-ENODEV);
  159. spin_unlock(&se_tpg->tpg_lun_lock);
  160. return se_lun;
  161. }
  162. static struct sbp_session *sbp_session_create(
  163. struct sbp_tpg *tpg,
  164. u64 guid)
  165. {
  166. struct sbp_session *sess;
  167. int ret;
  168. char guid_str[17];
  169. struct se_node_acl *se_nacl;
  170. sess = kmalloc(sizeof(*sess), GFP_KERNEL);
  171. if (!sess) {
  172. pr_err("failed to allocate session descriptor\n");
  173. return ERR_PTR(-ENOMEM);
  174. }
  175. sess->se_sess = transport_init_session();
  176. if (IS_ERR(sess->se_sess)) {
  177. pr_err("failed to init se_session\n");
  178. ret = PTR_ERR(sess->se_sess);
  179. kfree(sess);
  180. return ERR_PTR(ret);
  181. }
  182. snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
  183. se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
  184. if (!se_nacl) {
  185. pr_warn("Node ACL not found for %s\n", guid_str);
  186. transport_free_session(sess->se_sess);
  187. kfree(sess);
  188. return ERR_PTR(-EPERM);
  189. }
  190. sess->se_sess->se_node_acl = se_nacl;
  191. spin_lock_init(&sess->lock);
  192. INIT_LIST_HEAD(&sess->login_list);
  193. INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
  194. sess->guid = guid;
  195. transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
  196. return sess;
  197. }
  198. static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
  199. {
  200. spin_lock_bh(&sess->lock);
  201. if (!list_empty(&sess->login_list)) {
  202. spin_unlock_bh(&sess->lock);
  203. return;
  204. }
  205. spin_unlock_bh(&sess->lock);
  206. if (cancel_work)
  207. cancel_delayed_work_sync(&sess->maint_work);
  208. transport_deregister_session_configfs(sess->se_sess);
  209. transport_deregister_session(sess->se_sess);
  210. if (sess->card)
  211. fw_card_put(sess->card);
  212. kfree(sess);
  213. }
  214. static void sbp_target_agent_unregister(struct sbp_target_agent *);
  215. static void sbp_login_release(struct sbp_login_descriptor *login,
  216. bool cancel_work)
  217. {
  218. struct sbp_session *sess = login->sess;
  219. /* FIXME: abort/wait on tasks */
  220. sbp_target_agent_unregister(login->tgt_agt);
  221. if (sess) {
  222. spin_lock_bh(&sess->lock);
  223. list_del(&login->link);
  224. spin_unlock_bh(&sess->lock);
  225. sbp_session_release(sess, cancel_work);
  226. }
  227. kfree(login);
  228. }
  229. static struct sbp_target_agent *sbp_target_agent_register(
  230. struct sbp_login_descriptor *);
  231. static void sbp_management_request_login(
  232. struct sbp_management_agent *agent, struct sbp_management_request *req,
  233. int *status_data_size)
  234. {
  235. struct sbp_tport *tport = agent->tport;
  236. struct sbp_tpg *tpg = tport->tpg;
  237. struct se_lun *se_lun;
  238. int ret;
  239. u64 guid;
  240. struct sbp_session *sess;
  241. struct sbp_login_descriptor *login;
  242. struct sbp_login_response_block *response;
  243. int login_response_len;
  244. se_lun = sbp_get_lun_from_tpg(tpg,
  245. LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
  246. if (IS_ERR(se_lun)) {
  247. pr_notice("login to unknown LUN: %d\n",
  248. LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
  249. req->status.status = cpu_to_be32(
  250. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  251. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
  252. return;
  253. }
  254. ret = read_peer_guid(&guid, req);
  255. if (ret != RCODE_COMPLETE) {
  256. pr_warn("failed to read peer GUID: %d\n", ret);
  257. req->status.status = cpu_to_be32(
  258. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  259. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  260. return;
  261. }
  262. pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
  263. se_lun->unpacked_lun, guid);
  264. sess = sbp_session_find_by_guid(tpg, guid);
  265. if (sess) {
  266. login = sbp_login_find_by_lun(sess, se_lun);
  267. if (login) {
  268. pr_notice("initiator already logged-in\n");
  269. /*
  270. * SBP-2 R4 says we should return access denied, but
  271. * that can confuse initiators. Instead we need to
  272. * treat this like a reconnect, but send the login
  273. * response block like a fresh login.
  274. *
  275. * This is required particularly in the case of Apple
  276. * devices booting off the FireWire target, where
  277. * the firmware has an active login to the target. When
  278. * the OS takes control of the session it issues its own
  279. * LOGIN rather than a RECONNECT. To avoid the machine
  280. * waiting until the reconnect_hold expires, we can skip
  281. * the ACCESS_DENIED errors to speed things up.
  282. */
  283. goto already_logged_in;
  284. }
  285. }
  286. /*
  287. * check exclusive bit in login request
  288. * reject with access_denied if any logins present
  289. */
  290. if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
  291. sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
  292. pr_warn("refusing exclusive login with other active logins\n");
  293. req->status.status = cpu_to_be32(
  294. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  295. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  296. return;
  297. }
  298. /*
  299. * check exclusive bit in any existing login descriptor
  300. * reject with access_denied if any exclusive logins present
  301. */
  302. if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
  303. pr_warn("refusing login while another exclusive login present\n");
  304. req->status.status = cpu_to_be32(
  305. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  306. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  307. return;
  308. }
  309. /*
  310. * check we haven't exceeded the number of allowed logins
  311. * reject with resources_unavailable if we have
  312. */
  313. if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
  314. tport->max_logins_per_lun) {
  315. pr_warn("max number of logins reached\n");
  316. req->status.status = cpu_to_be32(
  317. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  318. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
  319. return;
  320. }
  321. if (!sess) {
  322. sess = sbp_session_create(tpg, guid);
  323. if (IS_ERR(sess)) {
  324. switch (PTR_ERR(sess)) {
  325. case -EPERM:
  326. ret = SBP_STATUS_ACCESS_DENIED;
  327. break;
  328. default:
  329. ret = SBP_STATUS_RESOURCES_UNAVAIL;
  330. break;
  331. }
  332. req->status.status = cpu_to_be32(
  333. STATUS_BLOCK_RESP(
  334. STATUS_RESP_REQUEST_COMPLETE) |
  335. STATUS_BLOCK_SBP_STATUS(ret));
  336. return;
  337. }
  338. sess->node_id = req->node_addr;
  339. sess->card = fw_card_get(req->card);
  340. sess->generation = req->generation;
  341. sess->speed = req->speed;
  342. schedule_delayed_work(&sess->maint_work,
  343. SESSION_MAINTENANCE_INTERVAL);
  344. }
  345. /* only take the latest reconnect_hold into account */
  346. sess->reconnect_hold = min(
  347. 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
  348. tport->max_reconnect_timeout) - 1;
  349. login = kmalloc(sizeof(*login), GFP_KERNEL);
  350. if (!login) {
  351. pr_err("failed to allocate login descriptor\n");
  352. sbp_session_release(sess, true);
  353. req->status.status = cpu_to_be32(
  354. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  355. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
  356. return;
  357. }
  358. login->sess = sess;
  359. login->lun = se_lun;
  360. login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
  361. login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
  362. login->login_id = atomic_inc_return(&login_id);
  363. login->tgt_agt = sbp_target_agent_register(login);
  364. if (IS_ERR(login->tgt_agt)) {
  365. ret = PTR_ERR(login->tgt_agt);
  366. pr_err("failed to map command block handler: %d\n", ret);
  367. sbp_session_release(sess, true);
  368. kfree(login);
  369. req->status.status = cpu_to_be32(
  370. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  371. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
  372. return;
  373. }
  374. spin_lock_bh(&sess->lock);
  375. list_add_tail(&login->link, &sess->login_list);
  376. spin_unlock_bh(&sess->lock);
  377. already_logged_in:
  378. response = kzalloc(sizeof(*response), GFP_KERNEL);
  379. if (!response) {
  380. pr_err("failed to allocate login response block\n");
  381. sbp_login_release(login, true);
  382. req->status.status = cpu_to_be32(
  383. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  384. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
  385. return;
  386. }
  387. login_response_len = clamp_val(
  388. LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
  389. 12, sizeof(*response));
  390. response->misc = cpu_to_be32(
  391. ((login_response_len & 0xffff) << 16) |
  392. (login->login_id & 0xffff));
  393. response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
  394. addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
  395. &response->command_block_agent);
  396. ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
  397. sess->node_id, sess->generation, sess->speed,
  398. sbp2_pointer_to_addr(&req->orb.ptr2), response,
  399. login_response_len);
  400. if (ret != RCODE_COMPLETE) {
  401. pr_debug("failed to write login response block: %x\n", ret);
  402. kfree(response);
  403. sbp_login_release(login, true);
  404. req->status.status = cpu_to_be32(
  405. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  406. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  407. return;
  408. }
  409. kfree(response);
  410. req->status.status = cpu_to_be32(
  411. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  412. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  413. }
  414. static void sbp_management_request_query_logins(
  415. struct sbp_management_agent *agent, struct sbp_management_request *req,
  416. int *status_data_size)
  417. {
  418. pr_notice("QUERY LOGINS not implemented\n");
  419. /* FIXME: implement */
  420. req->status.status = cpu_to_be32(
  421. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  422. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  423. }
  424. static void sbp_management_request_reconnect(
  425. struct sbp_management_agent *agent, struct sbp_management_request *req,
  426. int *status_data_size)
  427. {
  428. struct sbp_tport *tport = agent->tport;
  429. struct sbp_tpg *tpg = tport->tpg;
  430. int ret;
  431. u64 guid;
  432. struct sbp_login_descriptor *login;
  433. ret = read_peer_guid(&guid, req);
  434. if (ret != RCODE_COMPLETE) {
  435. pr_warn("failed to read peer GUID: %d\n", ret);
  436. req->status.status = cpu_to_be32(
  437. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  438. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  439. return;
  440. }
  441. pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
  442. login = sbp_login_find_by_id(tpg,
  443. RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
  444. if (!login) {
  445. pr_err("mgt_agent RECONNECT unknown login ID\n");
  446. req->status.status = cpu_to_be32(
  447. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  448. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  449. return;
  450. }
  451. if (login->sess->guid != guid) {
  452. pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
  453. req->status.status = cpu_to_be32(
  454. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  455. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  456. return;
  457. }
  458. spin_lock_bh(&login->sess->lock);
  459. if (login->sess->card)
  460. fw_card_put(login->sess->card);
  461. /* update the node details */
  462. login->sess->generation = req->generation;
  463. login->sess->node_id = req->node_addr;
  464. login->sess->card = fw_card_get(req->card);
  465. login->sess->speed = req->speed;
  466. spin_unlock_bh(&login->sess->lock);
  467. req->status.status = cpu_to_be32(
  468. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  469. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  470. }
  471. static void sbp_management_request_logout(
  472. struct sbp_management_agent *agent, struct sbp_management_request *req,
  473. int *status_data_size)
  474. {
  475. struct sbp_tport *tport = agent->tport;
  476. struct sbp_tpg *tpg = tport->tpg;
  477. int id;
  478. struct sbp_login_descriptor *login;
  479. id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
  480. login = sbp_login_find_by_id(tpg, id);
  481. if (!login) {
  482. pr_warn("cannot find login: %d\n", id);
  483. req->status.status = cpu_to_be32(
  484. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  485. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
  486. return;
  487. }
  488. pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
  489. login->lun->unpacked_lun, login->login_id);
  490. if (req->node_addr != login->sess->node_id) {
  491. pr_warn("logout from different node ID\n");
  492. req->status.status = cpu_to_be32(
  493. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  494. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  495. return;
  496. }
  497. sbp_login_release(login, true);
  498. req->status.status = cpu_to_be32(
  499. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  500. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  501. }
  502. static void session_check_for_reset(struct sbp_session *sess)
  503. {
  504. bool card_valid = false;
  505. spin_lock_bh(&sess->lock);
  506. if (sess->card) {
  507. spin_lock_irq(&sess->card->lock);
  508. card_valid = (sess->card->local_node != NULL);
  509. spin_unlock_irq(&sess->card->lock);
  510. if (!card_valid) {
  511. fw_card_put(sess->card);
  512. sess->card = NULL;
  513. }
  514. }
  515. if (!card_valid || (sess->generation != sess->card->generation)) {
  516. pr_info("Waiting for reconnect from node: %016llx\n",
  517. sess->guid);
  518. sess->node_id = -1;
  519. sess->reconnect_expires = get_jiffies_64() +
  520. ((sess->reconnect_hold + 1) * HZ);
  521. }
  522. spin_unlock_bh(&sess->lock);
  523. }
  524. static void session_reconnect_expired(struct sbp_session *sess)
  525. {
  526. struct sbp_login_descriptor *login, *temp;
  527. LIST_HEAD(login_list);
  528. pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
  529. spin_lock_bh(&sess->lock);
  530. list_for_each_entry_safe(login, temp, &sess->login_list, link) {
  531. login->sess = NULL;
  532. list_move_tail(&login->link, &login_list);
  533. }
  534. spin_unlock_bh(&sess->lock);
  535. list_for_each_entry_safe(login, temp, &login_list, link) {
  536. list_del(&login->link);
  537. sbp_login_release(login, false);
  538. }
  539. sbp_session_release(sess, false);
  540. }
  541. static void session_maintenance_work(struct work_struct *work)
  542. {
  543. struct sbp_session *sess = container_of(work, struct sbp_session,
  544. maint_work.work);
  545. /* could be called while tearing down the session */
  546. spin_lock_bh(&sess->lock);
  547. if (list_empty(&sess->login_list)) {
  548. spin_unlock_bh(&sess->lock);
  549. return;
  550. }
  551. spin_unlock_bh(&sess->lock);
  552. if (sess->node_id != -1) {
  553. /* check for bus reset and make node_id invalid */
  554. session_check_for_reset(sess);
  555. schedule_delayed_work(&sess->maint_work,
  556. SESSION_MAINTENANCE_INTERVAL);
  557. } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
  558. /* still waiting for reconnect */
  559. schedule_delayed_work(&sess->maint_work,
  560. SESSION_MAINTENANCE_INTERVAL);
  561. } else {
  562. /* reconnect timeout has expired */
  563. session_reconnect_expired(sess);
  564. }
  565. }
  566. static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
  567. struct sbp_target_agent *agent)
  568. {
  569. int state;
  570. switch (tcode) {
  571. case TCODE_READ_QUADLET_REQUEST:
  572. pr_debug("tgt_agent AGENT_STATE READ\n");
  573. spin_lock_bh(&agent->lock);
  574. state = agent->state;
  575. spin_unlock_bh(&agent->lock);
  576. *(__be32 *)data = cpu_to_be32(state);
  577. return RCODE_COMPLETE;
  578. case TCODE_WRITE_QUADLET_REQUEST:
  579. /* ignored */
  580. return RCODE_COMPLETE;
  581. default:
  582. return RCODE_TYPE_ERROR;
  583. }
  584. }
  585. static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
  586. struct sbp_target_agent *agent)
  587. {
  588. switch (tcode) {
  589. case TCODE_WRITE_QUADLET_REQUEST:
  590. pr_debug("tgt_agent AGENT_RESET\n");
  591. spin_lock_bh(&agent->lock);
  592. agent->state = AGENT_STATE_RESET;
  593. spin_unlock_bh(&agent->lock);
  594. return RCODE_COMPLETE;
  595. default:
  596. return RCODE_TYPE_ERROR;
  597. }
  598. }
  599. static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
  600. struct sbp_target_agent *agent)
  601. {
  602. struct sbp2_pointer *ptr = data;
  603. switch (tcode) {
  604. case TCODE_WRITE_BLOCK_REQUEST:
  605. spin_lock_bh(&agent->lock);
  606. if (agent->state != AGENT_STATE_SUSPENDED &&
  607. agent->state != AGENT_STATE_RESET) {
  608. spin_unlock_bh(&agent->lock);
  609. pr_notice("Ignoring ORB_POINTER write while active.\n");
  610. return RCODE_CONFLICT_ERROR;
  611. }
  612. agent->state = AGENT_STATE_ACTIVE;
  613. spin_unlock_bh(&agent->lock);
  614. agent->orb_pointer = sbp2_pointer_to_addr(ptr);
  615. agent->doorbell = false;
  616. pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
  617. agent->orb_pointer);
  618. queue_work(system_unbound_wq, &agent->work);
  619. return RCODE_COMPLETE;
  620. case TCODE_READ_BLOCK_REQUEST:
  621. pr_debug("tgt_agent ORB_POINTER READ\n");
  622. spin_lock_bh(&agent->lock);
  623. addr_to_sbp2_pointer(agent->orb_pointer, ptr);
  624. spin_unlock_bh(&agent->lock);
  625. return RCODE_COMPLETE;
  626. default:
  627. return RCODE_TYPE_ERROR;
  628. }
  629. }
  630. static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
  631. struct sbp_target_agent *agent)
  632. {
  633. switch (tcode) {
  634. case TCODE_WRITE_QUADLET_REQUEST:
  635. spin_lock_bh(&agent->lock);
  636. if (agent->state != AGENT_STATE_SUSPENDED) {
  637. spin_unlock_bh(&agent->lock);
  638. pr_debug("Ignoring DOORBELL while active.\n");
  639. return RCODE_CONFLICT_ERROR;
  640. }
  641. agent->state = AGENT_STATE_ACTIVE;
  642. spin_unlock_bh(&agent->lock);
  643. agent->doorbell = true;
  644. pr_debug("tgt_agent DOORBELL\n");
  645. queue_work(system_unbound_wq, &agent->work);
  646. return RCODE_COMPLETE;
  647. case TCODE_READ_QUADLET_REQUEST:
  648. return RCODE_COMPLETE;
  649. default:
  650. return RCODE_TYPE_ERROR;
  651. }
  652. }
  653. static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
  654. int tcode, void *data, struct sbp_target_agent *agent)
  655. {
  656. switch (tcode) {
  657. case TCODE_WRITE_QUADLET_REQUEST:
  658. pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
  659. /* ignored as we don't send unsolicited status */
  660. return RCODE_COMPLETE;
  661. case TCODE_READ_QUADLET_REQUEST:
  662. return RCODE_COMPLETE;
  663. default:
  664. return RCODE_TYPE_ERROR;
  665. }
  666. }
  667. static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
  668. int tcode, int destination, int source, int generation,
  669. unsigned long long offset, void *data, size_t length,
  670. void *callback_data)
  671. {
  672. struct sbp_target_agent *agent = callback_data;
  673. struct sbp_session *sess = agent->login->sess;
  674. int sess_gen, sess_node, rcode;
  675. spin_lock_bh(&sess->lock);
  676. sess_gen = sess->generation;
  677. sess_node = sess->node_id;
  678. spin_unlock_bh(&sess->lock);
  679. if (generation != sess_gen) {
  680. pr_notice("ignoring request with wrong generation\n");
  681. rcode = RCODE_TYPE_ERROR;
  682. goto out;
  683. }
  684. if (source != sess_node) {
  685. pr_notice("ignoring request from foreign node (%x != %x)\n",
  686. source, sess_node);
  687. rcode = RCODE_TYPE_ERROR;
  688. goto out;
  689. }
  690. /* turn offset into the offset from the start of the block */
  691. offset -= agent->handler.offset;
  692. if (offset == 0x00 && length == 4) {
  693. /* AGENT_STATE */
  694. rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
  695. } else if (offset == 0x04 && length == 4) {
  696. /* AGENT_RESET */
  697. rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
  698. } else if (offset == 0x08 && length == 8) {
  699. /* ORB_POINTER */
  700. rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
  701. } else if (offset == 0x10 && length == 4) {
  702. /* DOORBELL */
  703. rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
  704. } else if (offset == 0x14 && length == 4) {
  705. /* UNSOLICITED_STATUS_ENABLE */
  706. rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
  707. data, agent);
  708. } else {
  709. rcode = RCODE_ADDRESS_ERROR;
  710. }
  711. out:
  712. fw_send_response(card, request, rcode);
  713. }
  714. static void sbp_handle_command(struct sbp_target_request *);
  715. static int sbp_send_status(struct sbp_target_request *);
  716. static void sbp_free_request(struct sbp_target_request *);
  717. static void tgt_agent_process_work(struct work_struct *work)
  718. {
  719. struct sbp_target_request *req =
  720. container_of(work, struct sbp_target_request, work);
  721. pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
  722. req->orb_pointer,
  723. sbp2_pointer_to_addr(&req->orb.next_orb),
  724. sbp2_pointer_to_addr(&req->orb.data_descriptor),
  725. be32_to_cpu(req->orb.misc));
  726. if (req->orb_pointer >> 32)
  727. pr_debug("ORB with high bits set\n");
  728. switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
  729. case 0:/* Format specified by this standard */
  730. sbp_handle_command(req);
  731. return;
  732. case 1: /* Reserved for future standardization */
  733. case 2: /* Vendor-dependent */
  734. req->status.status |= cpu_to_be32(
  735. STATUS_BLOCK_RESP(
  736. STATUS_RESP_REQUEST_COMPLETE) |
  737. STATUS_BLOCK_DEAD(0) |
  738. STATUS_BLOCK_LEN(1) |
  739. STATUS_BLOCK_SBP_STATUS(
  740. SBP_STATUS_REQ_TYPE_NOTSUPP));
  741. sbp_send_status(req);
  742. sbp_free_request(req);
  743. return;
  744. case 3: /* Dummy ORB */
  745. req->status.status |= cpu_to_be32(
  746. STATUS_BLOCK_RESP(
  747. STATUS_RESP_REQUEST_COMPLETE) |
  748. STATUS_BLOCK_DEAD(0) |
  749. STATUS_BLOCK_LEN(1) |
  750. STATUS_BLOCK_SBP_STATUS(
  751. SBP_STATUS_DUMMY_ORB_COMPLETE));
  752. sbp_send_status(req);
  753. sbp_free_request(req);
  754. return;
  755. default:
  756. BUG();
  757. }
  758. }
  759. /* used to double-check we haven't been issued an AGENT_RESET */
  760. static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
  761. {
  762. bool active;
  763. spin_lock_bh(&agent->lock);
  764. active = (agent->state == AGENT_STATE_ACTIVE);
  765. spin_unlock_bh(&agent->lock);
  766. return active;
  767. }
  768. static void tgt_agent_fetch_work(struct work_struct *work)
  769. {
  770. struct sbp_target_agent *agent =
  771. container_of(work, struct sbp_target_agent, work);
  772. struct sbp_session *sess = agent->login->sess;
  773. struct sbp_target_request *req;
  774. int ret;
  775. bool doorbell = agent->doorbell;
  776. u64 next_orb = agent->orb_pointer;
  777. while (next_orb && tgt_agent_check_active(agent)) {
  778. req = kzalloc(sizeof(*req), GFP_KERNEL);
  779. if (!req) {
  780. spin_lock_bh(&agent->lock);
  781. agent->state = AGENT_STATE_DEAD;
  782. spin_unlock_bh(&agent->lock);
  783. return;
  784. }
  785. req->login = agent->login;
  786. req->orb_pointer = next_orb;
  787. req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
  788. req->orb_pointer >> 32));
  789. req->status.orb_low = cpu_to_be32(
  790. req->orb_pointer & 0xfffffffc);
  791. /* read in the ORB */
  792. ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
  793. sess->node_id, sess->generation, sess->speed,
  794. req->orb_pointer, &req->orb, sizeof(req->orb));
  795. if (ret != RCODE_COMPLETE) {
  796. pr_debug("tgt_orb fetch failed: %x\n", ret);
  797. req->status.status |= cpu_to_be32(
  798. STATUS_BLOCK_SRC(
  799. STATUS_SRC_ORB_FINISHED) |
  800. STATUS_BLOCK_RESP(
  801. STATUS_RESP_TRANSPORT_FAILURE) |
  802. STATUS_BLOCK_DEAD(1) |
  803. STATUS_BLOCK_LEN(1) |
  804. STATUS_BLOCK_SBP_STATUS(
  805. SBP_STATUS_UNSPECIFIED_ERROR));
  806. spin_lock_bh(&agent->lock);
  807. agent->state = AGENT_STATE_DEAD;
  808. spin_unlock_bh(&agent->lock);
  809. sbp_send_status(req);
  810. sbp_free_request(req);
  811. return;
  812. }
  813. /* check the next_ORB field */
  814. if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
  815. next_orb = 0;
  816. req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
  817. STATUS_SRC_ORB_FINISHED));
  818. } else {
  819. next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
  820. req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
  821. STATUS_SRC_ORB_CONTINUING));
  822. }
  823. if (tgt_agent_check_active(agent) && !doorbell) {
  824. INIT_WORK(&req->work, tgt_agent_process_work);
  825. queue_work(system_unbound_wq, &req->work);
  826. } else {
  827. /* don't process this request, just check next_ORB */
  828. sbp_free_request(req);
  829. }
  830. spin_lock_bh(&agent->lock);
  831. doorbell = agent->doorbell = false;
  832. /* check if we should carry on processing */
  833. if (next_orb)
  834. agent->orb_pointer = next_orb;
  835. else
  836. agent->state = AGENT_STATE_SUSPENDED;
  837. spin_unlock_bh(&agent->lock);
  838. };
  839. }
  840. static struct sbp_target_agent *sbp_target_agent_register(
  841. struct sbp_login_descriptor *login)
  842. {
  843. struct sbp_target_agent *agent;
  844. int ret;
  845. agent = kmalloc(sizeof(*agent), GFP_KERNEL);
  846. if (!agent)
  847. return ERR_PTR(-ENOMEM);
  848. spin_lock_init(&agent->lock);
  849. agent->handler.length = 0x20;
  850. agent->handler.address_callback = tgt_agent_rw;
  851. agent->handler.callback_data = agent;
  852. agent->login = login;
  853. agent->state = AGENT_STATE_RESET;
  854. INIT_WORK(&agent->work, tgt_agent_fetch_work);
  855. agent->orb_pointer = 0;
  856. agent->doorbell = false;
  857. ret = fw_core_add_address_handler(&agent->handler,
  858. &sbp_register_region);
  859. if (ret < 0) {
  860. kfree(agent);
  861. return ERR_PTR(ret);
  862. }
  863. return agent;
  864. }
  865. static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
  866. {
  867. fw_core_remove_address_handler(&agent->handler);
  868. cancel_work_sync(&agent->work);
  869. kfree(agent);
  870. }
  871. /*
  872. * Simple wrapper around fw_run_transaction that retries the transaction several
  873. * times in case of failure, with an exponential backoff.
  874. */
  875. static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
  876. int generation, int speed, unsigned long long offset,
  877. void *payload, size_t length)
  878. {
  879. int attempt, ret, delay;
  880. for (attempt = 1; attempt <= 5; attempt++) {
  881. ret = fw_run_transaction(card, tcode, destination_id,
  882. generation, speed, offset, payload, length);
  883. switch (ret) {
  884. case RCODE_COMPLETE:
  885. case RCODE_TYPE_ERROR:
  886. case RCODE_ADDRESS_ERROR:
  887. case RCODE_GENERATION:
  888. return ret;
  889. default:
  890. delay = 5 * attempt * attempt;
  891. usleep_range(delay, delay * 2);
  892. }
  893. }
  894. return ret;
  895. }
  896. /*
  897. * Wrapper around sbp_run_transaction that gets the card, destination,
  898. * generation and speed out of the request's session.
  899. */
  900. static int sbp_run_request_transaction(struct sbp_target_request *req,
  901. int tcode, unsigned long long offset, void *payload,
  902. size_t length)
  903. {
  904. struct sbp_login_descriptor *login = req->login;
  905. struct sbp_session *sess = login->sess;
  906. struct fw_card *card;
  907. int node_id, generation, speed, ret;
  908. spin_lock_bh(&sess->lock);
  909. card = fw_card_get(sess->card);
  910. node_id = sess->node_id;
  911. generation = sess->generation;
  912. speed = sess->speed;
  913. spin_unlock_bh(&sess->lock);
  914. ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
  915. offset, payload, length);
  916. fw_card_put(card);
  917. return ret;
  918. }
  919. static int sbp_fetch_command(struct sbp_target_request *req)
  920. {
  921. int ret, cmd_len, copy_len;
  922. cmd_len = scsi_command_size(req->orb.command_block);
  923. req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
  924. if (!req->cmd_buf)
  925. return -ENOMEM;
  926. memcpy(req->cmd_buf, req->orb.command_block,
  927. min_t(int, cmd_len, sizeof(req->orb.command_block)));
  928. if (cmd_len > sizeof(req->orb.command_block)) {
  929. pr_debug("sbp_fetch_command: filling in long command\n");
  930. copy_len = cmd_len - sizeof(req->orb.command_block);
  931. ret = sbp_run_request_transaction(req,
  932. TCODE_READ_BLOCK_REQUEST,
  933. req->orb_pointer + sizeof(req->orb),
  934. req->cmd_buf + sizeof(req->orb.command_block),
  935. copy_len);
  936. if (ret != RCODE_COMPLETE)
  937. return -EIO;
  938. }
  939. return 0;
  940. }
  941. static int sbp_fetch_page_table(struct sbp_target_request *req)
  942. {
  943. int pg_tbl_sz, ret;
  944. struct sbp_page_table_entry *pg_tbl;
  945. if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
  946. return 0;
  947. pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
  948. sizeof(struct sbp_page_table_entry);
  949. pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
  950. if (!pg_tbl)
  951. return -ENOMEM;
  952. ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
  953. sbp2_pointer_to_addr(&req->orb.data_descriptor),
  954. pg_tbl, pg_tbl_sz);
  955. if (ret != RCODE_COMPLETE) {
  956. kfree(pg_tbl);
  957. return -EIO;
  958. }
  959. req->pg_tbl = pg_tbl;
  960. return 0;
  961. }
  962. static void sbp_calc_data_length_direction(struct sbp_target_request *req,
  963. u32 *data_len, enum dma_data_direction *data_dir)
  964. {
  965. int data_size, direction, idx;
  966. data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
  967. direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
  968. if (!data_size) {
  969. *data_len = 0;
  970. *data_dir = DMA_NONE;
  971. return;
  972. }
  973. *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  974. if (req->pg_tbl) {
  975. *data_len = 0;
  976. for (idx = 0; idx < data_size; idx++) {
  977. *data_len += be16_to_cpu(
  978. req->pg_tbl[idx].segment_length);
  979. }
  980. } else {
  981. *data_len = data_size;
  982. }
  983. }
  984. static void sbp_handle_command(struct sbp_target_request *req)
  985. {
  986. struct sbp_login_descriptor *login = req->login;
  987. struct sbp_session *sess = login->sess;
  988. int ret, unpacked_lun;
  989. u32 data_length;
  990. enum dma_data_direction data_dir;
  991. ret = sbp_fetch_command(req);
  992. if (ret) {
  993. pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
  994. goto err;
  995. }
  996. ret = sbp_fetch_page_table(req);
  997. if (ret) {
  998. pr_debug("sbp_handle_command: fetch page table failed: %d\n",
  999. ret);
  1000. goto err;
  1001. }
  1002. unpacked_lun = req->login->lun->unpacked_lun;
  1003. sbp_calc_data_length_direction(req, &data_length, &data_dir);
  1004. pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
  1005. req->orb_pointer, unpacked_lun, data_length, data_dir);
  1006. if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
  1007. req->sense_buf, unpacked_lun, data_length,
  1008. MSG_SIMPLE_TAG, data_dir, 0))
  1009. goto err;
  1010. return;
  1011. err:
  1012. req->status.status |= cpu_to_be32(
  1013. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  1014. STATUS_BLOCK_DEAD(0) |
  1015. STATUS_BLOCK_LEN(1) |
  1016. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  1017. sbp_send_status(req);
  1018. sbp_free_request(req);
  1019. }
  1020. /*
  1021. * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
  1022. * DMA_FROM_DEVICE = write to initiator (SCSI READ)
  1023. */
  1024. static int sbp_rw_data(struct sbp_target_request *req)
  1025. {
  1026. struct sbp_session *sess = req->login->sess;
  1027. int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
  1028. generation, num_pte, length, tfr_length,
  1029. rcode = RCODE_COMPLETE;
  1030. struct sbp_page_table_entry *pte;
  1031. unsigned long long offset;
  1032. struct fw_card *card;
  1033. struct sg_mapping_iter iter;
  1034. if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
  1035. tcode = TCODE_WRITE_BLOCK_REQUEST;
  1036. sg_miter_flags = SG_MITER_FROM_SG;
  1037. } else {
  1038. tcode = TCODE_READ_BLOCK_REQUEST;
  1039. sg_miter_flags = SG_MITER_TO_SG;
  1040. }
  1041. max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
  1042. speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
  1043. pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
  1044. if (pg_size) {
  1045. pr_err("sbp_run_transaction: page size ignored\n");
  1046. pg_size = 0x100 << pg_size;
  1047. }
  1048. spin_lock_bh(&sess->lock);
  1049. card = fw_card_get(sess->card);
  1050. node_id = sess->node_id;
  1051. generation = sess->generation;
  1052. spin_unlock_bh(&sess->lock);
  1053. if (req->pg_tbl) {
  1054. pte = req->pg_tbl;
  1055. num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
  1056. offset = 0;
  1057. length = 0;
  1058. } else {
  1059. pte = NULL;
  1060. num_pte = 0;
  1061. offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
  1062. length = req->se_cmd.data_length;
  1063. }
  1064. sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
  1065. sg_miter_flags);
  1066. while (length || num_pte) {
  1067. if (!length) {
  1068. offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
  1069. be32_to_cpu(pte->segment_base_lo);
  1070. length = be16_to_cpu(pte->segment_length);
  1071. pte++;
  1072. num_pte--;
  1073. }
  1074. sg_miter_next(&iter);
  1075. tfr_length = min3(length, max_payload, (int)iter.length);
  1076. /* FIXME: take page_size into account */
  1077. rcode = sbp_run_transaction(card, tcode, node_id,
  1078. generation, speed,
  1079. offset, iter.addr, tfr_length);
  1080. if (rcode != RCODE_COMPLETE)
  1081. break;
  1082. length -= tfr_length;
  1083. offset += tfr_length;
  1084. iter.consumed = tfr_length;
  1085. }
  1086. sg_miter_stop(&iter);
  1087. fw_card_put(card);
  1088. if (rcode == RCODE_COMPLETE) {
  1089. WARN_ON(length != 0);
  1090. return 0;
  1091. } else {
  1092. return -EIO;
  1093. }
  1094. }
  1095. static int sbp_send_status(struct sbp_target_request *req)
  1096. {
  1097. int ret, length;
  1098. struct sbp_login_descriptor *login = req->login;
  1099. length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
  1100. ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
  1101. login->status_fifo_addr, &req->status, length);
  1102. if (ret != RCODE_COMPLETE) {
  1103. pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
  1104. return -EIO;
  1105. }
  1106. pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
  1107. req->orb_pointer);
  1108. return 0;
  1109. }
  1110. static void sbp_sense_mangle(struct sbp_target_request *req)
  1111. {
  1112. struct se_cmd *se_cmd = &req->se_cmd;
  1113. u8 *sense = req->sense_buf;
  1114. u8 *status = req->status.data;
  1115. WARN_ON(se_cmd->scsi_sense_length < 18);
  1116. switch (sense[0] & 0x7f) { /* sfmt */
  1117. case 0x70: /* current, fixed */
  1118. status[0] = 0 << 6;
  1119. break;
  1120. case 0x71: /* deferred, fixed */
  1121. status[0] = 1 << 6;
  1122. break;
  1123. case 0x72: /* current, descriptor */
  1124. case 0x73: /* deferred, descriptor */
  1125. default:
  1126. /*
  1127. * TODO: SBP-3 specifies what we should do with descriptor
  1128. * format sense data
  1129. */
  1130. pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
  1131. sense[0]);
  1132. req->status.status |= cpu_to_be32(
  1133. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1134. STATUS_BLOCK_DEAD(0) |
  1135. STATUS_BLOCK_LEN(1) |
  1136. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
  1137. return;
  1138. }
  1139. status[0] |= se_cmd->scsi_status & 0x3f;/* status */
  1140. status[1] =
  1141. (sense[0] & 0x80) | /* valid */
  1142. ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
  1143. (sense[2] & 0x0f); /* sense_key */
  1144. status[2] = se_cmd->scsi_asc; /* sense_code */
  1145. status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
  1146. /* information */
  1147. status[4] = sense[3];
  1148. status[5] = sense[4];
  1149. status[6] = sense[5];
  1150. status[7] = sense[6];
  1151. /* CDB-dependent */
  1152. status[8] = sense[8];
  1153. status[9] = sense[9];
  1154. status[10] = sense[10];
  1155. status[11] = sense[11];
  1156. /* fru */
  1157. status[12] = sense[14];
  1158. /* sense_key-dependent */
  1159. status[13] = sense[15];
  1160. status[14] = sense[16];
  1161. status[15] = sense[17];
  1162. req->status.status |= cpu_to_be32(
  1163. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1164. STATUS_BLOCK_DEAD(0) |
  1165. STATUS_BLOCK_LEN(5) |
  1166. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  1167. }
  1168. static int sbp_send_sense(struct sbp_target_request *req)
  1169. {
  1170. struct se_cmd *se_cmd = &req->se_cmd;
  1171. if (se_cmd->scsi_sense_length) {
  1172. sbp_sense_mangle(req);
  1173. } else {
  1174. req->status.status |= cpu_to_be32(
  1175. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1176. STATUS_BLOCK_DEAD(0) |
  1177. STATUS_BLOCK_LEN(1) |
  1178. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  1179. }
  1180. return sbp_send_status(req);
  1181. }
  1182. static void sbp_free_request(struct sbp_target_request *req)
  1183. {
  1184. kfree(req->pg_tbl);
  1185. kfree(req->cmd_buf);
  1186. kfree(req);
  1187. }
  1188. static void sbp_mgt_agent_process(struct work_struct *work)
  1189. {
  1190. struct sbp_management_agent *agent =
  1191. container_of(work, struct sbp_management_agent, work);
  1192. struct sbp_management_request *req = agent->request;
  1193. int ret;
  1194. int status_data_len = 0;
  1195. /* fetch the ORB from the initiator */
  1196. ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
  1197. req->node_addr, req->generation, req->speed,
  1198. agent->orb_offset, &req->orb, sizeof(req->orb));
  1199. if (ret != RCODE_COMPLETE) {
  1200. pr_debug("mgt_orb fetch failed: %x\n", ret);
  1201. goto out;
  1202. }
  1203. pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
  1204. sbp2_pointer_to_addr(&req->orb.ptr1),
  1205. sbp2_pointer_to_addr(&req->orb.ptr2),
  1206. be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
  1207. sbp2_pointer_to_addr(&req->orb.status_fifo));
  1208. if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
  1209. ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
  1210. pr_err("mgt_orb bad request\n");
  1211. goto out;
  1212. }
  1213. switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
  1214. case MANAGEMENT_ORB_FUNCTION_LOGIN:
  1215. sbp_management_request_login(agent, req, &status_data_len);
  1216. break;
  1217. case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
  1218. sbp_management_request_query_logins(agent, req,
  1219. &status_data_len);
  1220. break;
  1221. case MANAGEMENT_ORB_FUNCTION_RECONNECT:
  1222. sbp_management_request_reconnect(agent, req, &status_data_len);
  1223. break;
  1224. case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
  1225. pr_notice("SET PASSWORD not implemented\n");
  1226. req->status.status = cpu_to_be32(
  1227. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1228. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1229. break;
  1230. case MANAGEMENT_ORB_FUNCTION_LOGOUT:
  1231. sbp_management_request_logout(agent, req, &status_data_len);
  1232. break;
  1233. case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
  1234. pr_notice("ABORT TASK not implemented\n");
  1235. req->status.status = cpu_to_be32(
  1236. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1237. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1238. break;
  1239. case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
  1240. pr_notice("ABORT TASK SET not implemented\n");
  1241. req->status.status = cpu_to_be32(
  1242. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1243. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1244. break;
  1245. case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
  1246. pr_notice("LOGICAL UNIT RESET not implemented\n");
  1247. req->status.status = cpu_to_be32(
  1248. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1249. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1250. break;
  1251. case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
  1252. pr_notice("TARGET RESET not implemented\n");
  1253. req->status.status = cpu_to_be32(
  1254. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1255. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1256. break;
  1257. default:
  1258. pr_notice("unknown management function 0x%x\n",
  1259. MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
  1260. req->status.status = cpu_to_be32(
  1261. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1262. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1263. break;
  1264. }
  1265. req->status.status |= cpu_to_be32(
  1266. STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
  1267. STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
  1268. STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
  1269. req->status.orb_low = cpu_to_be32(agent->orb_offset);
  1270. /* write the status block back to the initiator */
  1271. ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
  1272. req->node_addr, req->generation, req->speed,
  1273. sbp2_pointer_to_addr(&req->orb.status_fifo),
  1274. &req->status, 8 + status_data_len);
  1275. if (ret != RCODE_COMPLETE) {
  1276. pr_debug("mgt_orb status write failed: %x\n", ret);
  1277. goto out;
  1278. }
  1279. out:
  1280. fw_card_put(req->card);
  1281. kfree(req);
  1282. spin_lock_bh(&agent->lock);
  1283. agent->state = MANAGEMENT_AGENT_STATE_IDLE;
  1284. spin_unlock_bh(&agent->lock);
  1285. }
  1286. static void sbp_mgt_agent_rw(struct fw_card *card,
  1287. struct fw_request *request, int tcode, int destination, int source,
  1288. int generation, unsigned long long offset, void *data, size_t length,
  1289. void *callback_data)
  1290. {
  1291. struct sbp_management_agent *agent = callback_data;
  1292. struct sbp2_pointer *ptr = data;
  1293. int rcode = RCODE_ADDRESS_ERROR;
  1294. if (!agent->tport->enable)
  1295. goto out;
  1296. if ((offset != agent->handler.offset) || (length != 8))
  1297. goto out;
  1298. if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
  1299. struct sbp_management_request *req;
  1300. int prev_state;
  1301. spin_lock_bh(&agent->lock);
  1302. prev_state = agent->state;
  1303. agent->state = MANAGEMENT_AGENT_STATE_BUSY;
  1304. spin_unlock_bh(&agent->lock);
  1305. if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
  1306. pr_notice("ignoring management request while busy\n");
  1307. rcode = RCODE_CONFLICT_ERROR;
  1308. goto out;
  1309. }
  1310. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  1311. if (!req) {
  1312. rcode = RCODE_CONFLICT_ERROR;
  1313. goto out;
  1314. }
  1315. req->card = fw_card_get(card);
  1316. req->generation = generation;
  1317. req->node_addr = source;
  1318. req->speed = fw_get_request_speed(request);
  1319. agent->orb_offset = sbp2_pointer_to_addr(ptr);
  1320. agent->request = req;
  1321. queue_work(system_unbound_wq, &agent->work);
  1322. rcode = RCODE_COMPLETE;
  1323. } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
  1324. addr_to_sbp2_pointer(agent->orb_offset, ptr);
  1325. rcode = RCODE_COMPLETE;
  1326. } else {
  1327. rcode = RCODE_TYPE_ERROR;
  1328. }
  1329. out:
  1330. fw_send_response(card, request, rcode);
  1331. }
  1332. static struct sbp_management_agent *sbp_management_agent_register(
  1333. struct sbp_tport *tport)
  1334. {
  1335. int ret;
  1336. struct sbp_management_agent *agent;
  1337. agent = kmalloc(sizeof(*agent), GFP_KERNEL);
  1338. if (!agent)
  1339. return ERR_PTR(-ENOMEM);
  1340. spin_lock_init(&agent->lock);
  1341. agent->tport = tport;
  1342. agent->handler.length = 0x08;
  1343. agent->handler.address_callback = sbp_mgt_agent_rw;
  1344. agent->handler.callback_data = agent;
  1345. agent->state = MANAGEMENT_AGENT_STATE_IDLE;
  1346. INIT_WORK(&agent->work, sbp_mgt_agent_process);
  1347. agent->orb_offset = 0;
  1348. agent->request = NULL;
  1349. ret = fw_core_add_address_handler(&agent->handler,
  1350. &sbp_register_region);
  1351. if (ret < 0) {
  1352. kfree(agent);
  1353. return ERR_PTR(ret);
  1354. }
  1355. return agent;
  1356. }
  1357. static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
  1358. {
  1359. fw_core_remove_address_handler(&agent->handler);
  1360. cancel_work_sync(&agent->work);
  1361. kfree(agent);
  1362. }
  1363. static int sbp_check_true(struct se_portal_group *se_tpg)
  1364. {
  1365. return 1;
  1366. }
  1367. static int sbp_check_false(struct se_portal_group *se_tpg)
  1368. {
  1369. return 0;
  1370. }
  1371. static char *sbp_get_fabric_name(void)
  1372. {
  1373. return "sbp";
  1374. }
  1375. static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
  1376. {
  1377. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1378. struct sbp_tport *tport = tpg->tport;
  1379. return &tport->tport_name[0];
  1380. }
  1381. static u16 sbp_get_tag(struct se_portal_group *se_tpg)
  1382. {
  1383. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1384. return tpg->tport_tpgt;
  1385. }
  1386. static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
  1387. {
  1388. return 1;
  1389. }
  1390. static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
  1391. {
  1392. struct sbp_nacl *nacl;
  1393. nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
  1394. if (!nacl) {
  1395. pr_err("Unable to allocate struct sbp_nacl\n");
  1396. return NULL;
  1397. }
  1398. return &nacl->se_node_acl;
  1399. }
  1400. static void sbp_release_fabric_acl(
  1401. struct se_portal_group *se_tpg,
  1402. struct se_node_acl *se_nacl)
  1403. {
  1404. struct sbp_nacl *nacl =
  1405. container_of(se_nacl, struct sbp_nacl, se_node_acl);
  1406. kfree(nacl);
  1407. }
  1408. static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
  1409. {
  1410. return 1;
  1411. }
  1412. static void sbp_release_cmd(struct se_cmd *se_cmd)
  1413. {
  1414. struct sbp_target_request *req = container_of(se_cmd,
  1415. struct sbp_target_request, se_cmd);
  1416. sbp_free_request(req);
  1417. }
  1418. static int sbp_shutdown_session(struct se_session *se_sess)
  1419. {
  1420. return 0;
  1421. }
  1422. static void sbp_close_session(struct se_session *se_sess)
  1423. {
  1424. return;
  1425. }
  1426. static u32 sbp_sess_get_index(struct se_session *se_sess)
  1427. {
  1428. return 0;
  1429. }
  1430. static int sbp_write_pending(struct se_cmd *se_cmd)
  1431. {
  1432. struct sbp_target_request *req = container_of(se_cmd,
  1433. struct sbp_target_request, se_cmd);
  1434. int ret;
  1435. ret = sbp_rw_data(req);
  1436. if (ret) {
  1437. req->status.status |= cpu_to_be32(
  1438. STATUS_BLOCK_RESP(
  1439. STATUS_RESP_TRANSPORT_FAILURE) |
  1440. STATUS_BLOCK_DEAD(0) |
  1441. STATUS_BLOCK_LEN(1) |
  1442. STATUS_BLOCK_SBP_STATUS(
  1443. SBP_STATUS_UNSPECIFIED_ERROR));
  1444. sbp_send_status(req);
  1445. return ret;
  1446. }
  1447. target_execute_cmd(se_cmd);
  1448. return 0;
  1449. }
  1450. static int sbp_write_pending_status(struct se_cmd *se_cmd)
  1451. {
  1452. return 0;
  1453. }
  1454. static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
  1455. {
  1456. return;
  1457. }
  1458. static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
  1459. {
  1460. struct sbp_target_request *req = container_of(se_cmd,
  1461. struct sbp_target_request, se_cmd);
  1462. /* only used for printk until we do TMRs */
  1463. return (u32)req->orb_pointer;
  1464. }
  1465. static int sbp_get_cmd_state(struct se_cmd *se_cmd)
  1466. {
  1467. return 0;
  1468. }
  1469. static int sbp_queue_data_in(struct se_cmd *se_cmd)
  1470. {
  1471. struct sbp_target_request *req = container_of(se_cmd,
  1472. struct sbp_target_request, se_cmd);
  1473. int ret;
  1474. ret = sbp_rw_data(req);
  1475. if (ret) {
  1476. req->status.status |= cpu_to_be32(
  1477. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  1478. STATUS_BLOCK_DEAD(0) |
  1479. STATUS_BLOCK_LEN(1) |
  1480. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  1481. sbp_send_status(req);
  1482. return ret;
  1483. }
  1484. return sbp_send_sense(req);
  1485. }
  1486. /*
  1487. * Called after command (no data transfer) or after the write (to device)
  1488. * operation is completed
  1489. */
  1490. static int sbp_queue_status(struct se_cmd *se_cmd)
  1491. {
  1492. struct sbp_target_request *req = container_of(se_cmd,
  1493. struct sbp_target_request, se_cmd);
  1494. return sbp_send_sense(req);
  1495. }
  1496. static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
  1497. {
  1498. }
  1499. static int sbp_check_stop_free(struct se_cmd *se_cmd)
  1500. {
  1501. struct sbp_target_request *req = container_of(se_cmd,
  1502. struct sbp_target_request, se_cmd);
  1503. transport_generic_free_cmd(&req->se_cmd, 0);
  1504. return 1;
  1505. }
  1506. /*
  1507. * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
  1508. */
  1509. static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
  1510. {
  1511. /*
  1512. * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
  1513. * This is defined in section 7.5.1 Table 362 in spc4r17
  1514. */
  1515. return SCSI_PROTOCOL_SBP;
  1516. }
  1517. static u32 sbp_get_pr_transport_id(
  1518. struct se_portal_group *se_tpg,
  1519. struct se_node_acl *se_nacl,
  1520. struct t10_pr_registration *pr_reg,
  1521. int *format_code,
  1522. unsigned char *buf)
  1523. {
  1524. int ret;
  1525. /*
  1526. * Set PROTOCOL IDENTIFIER to 3h for SBP
  1527. */
  1528. buf[0] = SCSI_PROTOCOL_SBP;
  1529. /*
  1530. * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
  1531. * over IEEE 1394
  1532. */
  1533. ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
  1534. if (ret < 0)
  1535. pr_debug("sbp transport_id: invalid hex string\n");
  1536. /*
  1537. * The IEEE 1394 Transport ID is a hardcoded 24-byte length
  1538. */
  1539. return 24;
  1540. }
  1541. static u32 sbp_get_pr_transport_id_len(
  1542. struct se_portal_group *se_tpg,
  1543. struct se_node_acl *se_nacl,
  1544. struct t10_pr_registration *pr_reg,
  1545. int *format_code)
  1546. {
  1547. *format_code = 0;
  1548. /*
  1549. * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
  1550. * over IEEE 1394
  1551. *
  1552. * The SBP Transport ID is a hardcoded 24-byte length
  1553. */
  1554. return 24;
  1555. }
  1556. /*
  1557. * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
  1558. * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
  1559. */
  1560. static char *sbp_parse_pr_out_transport_id(
  1561. struct se_portal_group *se_tpg,
  1562. const char *buf,
  1563. u32 *out_tid_len,
  1564. char **port_nexus_ptr)
  1565. {
  1566. /*
  1567. * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
  1568. * for initiator ports using SCSI over SBP Serial SCSI Protocol
  1569. *
  1570. * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
  1571. * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
  1572. * so we return the **port_nexus_ptr set to NULL.
  1573. */
  1574. *port_nexus_ptr = NULL;
  1575. *out_tid_len = 24;
  1576. return (char *)&buf[8];
  1577. }
  1578. static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
  1579. {
  1580. int i, count = 0;
  1581. spin_lock(&tpg->tpg_lun_lock);
  1582. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  1583. struct se_lun *se_lun = tpg->tpg_lun_list[i];
  1584. if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
  1585. continue;
  1586. count++;
  1587. }
  1588. spin_unlock(&tpg->tpg_lun_lock);
  1589. return count;
  1590. }
  1591. static int sbp_update_unit_directory(struct sbp_tport *tport)
  1592. {
  1593. int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
  1594. u32 *data;
  1595. if (tport->unit_directory.data) {
  1596. fw_core_remove_descriptor(&tport->unit_directory);
  1597. kfree(tport->unit_directory.data);
  1598. tport->unit_directory.data = NULL;
  1599. }
  1600. if (!tport->enable || !tport->tpg)
  1601. return 0;
  1602. num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
  1603. /*
  1604. * Number of entries in the final unit directory:
  1605. * - all of those in the template
  1606. * - management_agent
  1607. * - unit_characteristics
  1608. * - reconnect_timeout
  1609. * - unit unique ID
  1610. * - one for each LUN
  1611. *
  1612. * MUST NOT include leaf or sub-directory entries
  1613. */
  1614. num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
  1615. if (tport->directory_id != -1)
  1616. num_entries++;
  1617. /* allocate num_entries + 4 for the header and unique ID leaf */
  1618. data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
  1619. if (!data)
  1620. return -ENOMEM;
  1621. /* directory_length */
  1622. data[idx++] = num_entries << 16;
  1623. /* directory_id */
  1624. if (tport->directory_id != -1)
  1625. data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
  1626. /* unit directory template */
  1627. memcpy(&data[idx], sbp_unit_directory_template,
  1628. sizeof(sbp_unit_directory_template));
  1629. idx += ARRAY_SIZE(sbp_unit_directory_template);
  1630. /* management_agent */
  1631. mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
  1632. data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
  1633. /* unit_characteristics */
  1634. data[idx++] = 0x3a000000 |
  1635. (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
  1636. SBP_ORB_FETCH_SIZE;
  1637. /* reconnect_timeout */
  1638. data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
  1639. /* unit unique ID (leaf is just after LUNs) */
  1640. data[idx++] = 0x8d000000 | (num_luns + 1);
  1641. spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
  1642. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  1643. struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
  1644. struct se_device *dev;
  1645. int type;
  1646. if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
  1647. continue;
  1648. spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
  1649. dev = se_lun->lun_se_dev;
  1650. type = dev->transport->get_device_type(dev);
  1651. /* logical_unit_number */
  1652. data[idx++] = 0x14000000 |
  1653. ((type << 16) & 0x1f0000) |
  1654. (se_lun->unpacked_lun & 0xffff);
  1655. spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
  1656. }
  1657. spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
  1658. /* unit unique ID leaf */
  1659. data[idx++] = 2 << 16;
  1660. data[idx++] = tport->guid >> 32;
  1661. data[idx++] = tport->guid;
  1662. tport->unit_directory.length = idx;
  1663. tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
  1664. tport->unit_directory.data = data;
  1665. ret = fw_core_add_descriptor(&tport->unit_directory);
  1666. if (ret < 0) {
  1667. kfree(tport->unit_directory.data);
  1668. tport->unit_directory.data = NULL;
  1669. }
  1670. return ret;
  1671. }
  1672. static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
  1673. {
  1674. const char *cp;
  1675. char c, nibble;
  1676. int pos = 0, err;
  1677. *wwn = 0;
  1678. for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
  1679. c = *cp;
  1680. if (c == '\n' && cp[1] == '\0')
  1681. continue;
  1682. if (c == '\0') {
  1683. err = 2;
  1684. if (pos != 16)
  1685. goto fail;
  1686. return cp - name;
  1687. }
  1688. err = 3;
  1689. if (isdigit(c))
  1690. nibble = c - '0';
  1691. else if (isxdigit(c))
  1692. nibble = tolower(c) - 'a' + 10;
  1693. else
  1694. goto fail;
  1695. *wwn = (*wwn << 4) | nibble;
  1696. pos++;
  1697. }
  1698. err = 4;
  1699. fail:
  1700. printk(KERN_INFO "err %u len %zu pos %u\n",
  1701. err, cp - name, pos);
  1702. return -1;
  1703. }
  1704. static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
  1705. {
  1706. return snprintf(buf, len, "%016llx", wwn);
  1707. }
  1708. static struct se_node_acl *sbp_make_nodeacl(
  1709. struct se_portal_group *se_tpg,
  1710. struct config_group *group,
  1711. const char *name)
  1712. {
  1713. struct se_node_acl *se_nacl, *se_nacl_new;
  1714. struct sbp_nacl *nacl;
  1715. u64 guid = 0;
  1716. u32 nexus_depth = 1;
  1717. if (sbp_parse_wwn(name, &guid) < 0)
  1718. return ERR_PTR(-EINVAL);
  1719. se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
  1720. if (!se_nacl_new)
  1721. return ERR_PTR(-ENOMEM);
  1722. /*
  1723. * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
  1724. * when converting a NodeACL from demo mode -> explict
  1725. */
  1726. se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
  1727. name, nexus_depth);
  1728. if (IS_ERR(se_nacl)) {
  1729. sbp_release_fabric_acl(se_tpg, se_nacl_new);
  1730. return se_nacl;
  1731. }
  1732. nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
  1733. nacl->guid = guid;
  1734. sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
  1735. return se_nacl;
  1736. }
  1737. static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
  1738. {
  1739. struct sbp_nacl *nacl =
  1740. container_of(se_acl, struct sbp_nacl, se_node_acl);
  1741. core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
  1742. kfree(nacl);
  1743. }
  1744. static int sbp_post_link_lun(
  1745. struct se_portal_group *se_tpg,
  1746. struct se_lun *se_lun)
  1747. {
  1748. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1749. return sbp_update_unit_directory(tpg->tport);
  1750. }
  1751. static void sbp_pre_unlink_lun(
  1752. struct se_portal_group *se_tpg,
  1753. struct se_lun *se_lun)
  1754. {
  1755. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1756. struct sbp_tport *tport = tpg->tport;
  1757. int ret;
  1758. if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
  1759. tport->enable = 0;
  1760. ret = sbp_update_unit_directory(tport);
  1761. if (ret < 0)
  1762. pr_err("unlink LUN: failed to update unit directory\n");
  1763. }
  1764. static struct se_portal_group *sbp_make_tpg(
  1765. struct se_wwn *wwn,
  1766. struct config_group *group,
  1767. const char *name)
  1768. {
  1769. struct sbp_tport *tport =
  1770. container_of(wwn, struct sbp_tport, tport_wwn);
  1771. struct sbp_tpg *tpg;
  1772. unsigned long tpgt;
  1773. int ret;
  1774. if (strstr(name, "tpgt_") != name)
  1775. return ERR_PTR(-EINVAL);
  1776. if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
  1777. return ERR_PTR(-EINVAL);
  1778. if (tport->tpg) {
  1779. pr_err("Only one TPG per Unit is possible.\n");
  1780. return ERR_PTR(-EBUSY);
  1781. }
  1782. tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
  1783. if (!tpg) {
  1784. pr_err("Unable to allocate struct sbp_tpg\n");
  1785. return ERR_PTR(-ENOMEM);
  1786. }
  1787. tpg->tport = tport;
  1788. tpg->tport_tpgt = tpgt;
  1789. tport->tpg = tpg;
  1790. /* default attribute values */
  1791. tport->enable = 0;
  1792. tport->directory_id = -1;
  1793. tport->mgt_orb_timeout = 15;
  1794. tport->max_reconnect_timeout = 5;
  1795. tport->max_logins_per_lun = 1;
  1796. tport->mgt_agt = sbp_management_agent_register(tport);
  1797. if (IS_ERR(tport->mgt_agt)) {
  1798. ret = PTR_ERR(tport->mgt_agt);
  1799. goto out_free_tpg;
  1800. }
  1801. ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
  1802. &tpg->se_tpg, (void *)tpg,
  1803. TRANSPORT_TPG_TYPE_NORMAL);
  1804. if (ret < 0)
  1805. goto out_unreg_mgt_agt;
  1806. return &tpg->se_tpg;
  1807. out_unreg_mgt_agt:
  1808. sbp_management_agent_unregister(tport->mgt_agt);
  1809. out_free_tpg:
  1810. tport->tpg = NULL;
  1811. kfree(tpg);
  1812. return ERR_PTR(ret);
  1813. }
  1814. static void sbp_drop_tpg(struct se_portal_group *se_tpg)
  1815. {
  1816. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1817. struct sbp_tport *tport = tpg->tport;
  1818. core_tpg_deregister(se_tpg);
  1819. sbp_management_agent_unregister(tport->mgt_agt);
  1820. tport->tpg = NULL;
  1821. kfree(tpg);
  1822. }
  1823. static struct se_wwn *sbp_make_tport(
  1824. struct target_fabric_configfs *tf,
  1825. struct config_group *group,
  1826. const char *name)
  1827. {
  1828. struct sbp_tport *tport;
  1829. u64 guid = 0;
  1830. if (sbp_parse_wwn(name, &guid) < 0)
  1831. return ERR_PTR(-EINVAL);
  1832. tport = kzalloc(sizeof(*tport), GFP_KERNEL);
  1833. if (!tport) {
  1834. pr_err("Unable to allocate struct sbp_tport\n");
  1835. return ERR_PTR(-ENOMEM);
  1836. }
  1837. tport->guid = guid;
  1838. sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
  1839. return &tport->tport_wwn;
  1840. }
  1841. static void sbp_drop_tport(struct se_wwn *wwn)
  1842. {
  1843. struct sbp_tport *tport =
  1844. container_of(wwn, struct sbp_tport, tport_wwn);
  1845. kfree(tport);
  1846. }
  1847. static ssize_t sbp_wwn_show_attr_version(
  1848. struct target_fabric_configfs *tf,
  1849. char *page)
  1850. {
  1851. return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
  1852. }
  1853. TF_WWN_ATTR_RO(sbp, version);
  1854. static struct configfs_attribute *sbp_wwn_attrs[] = {
  1855. &sbp_wwn_version.attr,
  1856. NULL,
  1857. };
  1858. static ssize_t sbp_tpg_show_directory_id(
  1859. struct se_portal_group *se_tpg,
  1860. char *page)
  1861. {
  1862. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1863. struct sbp_tport *tport = tpg->tport;
  1864. if (tport->directory_id == -1)
  1865. return sprintf(page, "implicit\n");
  1866. else
  1867. return sprintf(page, "%06x\n", tport->directory_id);
  1868. }
  1869. static ssize_t sbp_tpg_store_directory_id(
  1870. struct se_portal_group *se_tpg,
  1871. const char *page,
  1872. size_t count)
  1873. {
  1874. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1875. struct sbp_tport *tport = tpg->tport;
  1876. unsigned long val;
  1877. if (tport->enable) {
  1878. pr_err("Cannot change the directory_id on an active target.\n");
  1879. return -EBUSY;
  1880. }
  1881. if (strstr(page, "implicit") == page) {
  1882. tport->directory_id = -1;
  1883. } else {
  1884. if (kstrtoul(page, 16, &val) < 0)
  1885. return -EINVAL;
  1886. if (val > 0xffffff)
  1887. return -EINVAL;
  1888. tport->directory_id = val;
  1889. }
  1890. return count;
  1891. }
  1892. static ssize_t sbp_tpg_show_enable(
  1893. struct se_portal_group *se_tpg,
  1894. char *page)
  1895. {
  1896. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1897. struct sbp_tport *tport = tpg->tport;
  1898. return sprintf(page, "%d\n", tport->enable);
  1899. }
  1900. static ssize_t sbp_tpg_store_enable(
  1901. struct se_portal_group *se_tpg,
  1902. const char *page,
  1903. size_t count)
  1904. {
  1905. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1906. struct sbp_tport *tport = tpg->tport;
  1907. unsigned long val;
  1908. int ret;
  1909. if (kstrtoul(page, 0, &val) < 0)
  1910. return -EINVAL;
  1911. if ((val != 0) && (val != 1))
  1912. return -EINVAL;
  1913. if (tport->enable == val)
  1914. return count;
  1915. if (val) {
  1916. if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
  1917. pr_err("Cannot enable a target with no LUNs!\n");
  1918. return -EINVAL;
  1919. }
  1920. } else {
  1921. /* XXX: force-shutdown sessions instead? */
  1922. spin_lock_bh(&se_tpg->session_lock);
  1923. if (!list_empty(&se_tpg->tpg_sess_list)) {
  1924. spin_unlock_bh(&se_tpg->session_lock);
  1925. return -EBUSY;
  1926. }
  1927. spin_unlock_bh(&se_tpg->session_lock);
  1928. }
  1929. tport->enable = val;
  1930. ret = sbp_update_unit_directory(tport);
  1931. if (ret < 0) {
  1932. pr_err("Could not update Config ROM\n");
  1933. return ret;
  1934. }
  1935. return count;
  1936. }
  1937. TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR);
  1938. TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR);
  1939. static struct configfs_attribute *sbp_tpg_base_attrs[] = {
  1940. &sbp_tpg_directory_id.attr,
  1941. &sbp_tpg_enable.attr,
  1942. NULL,
  1943. };
  1944. static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
  1945. struct se_portal_group *se_tpg,
  1946. char *page)
  1947. {
  1948. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1949. struct sbp_tport *tport = tpg->tport;
  1950. return sprintf(page, "%d\n", tport->mgt_orb_timeout);
  1951. }
  1952. static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
  1953. struct se_portal_group *se_tpg,
  1954. const char *page,
  1955. size_t count)
  1956. {
  1957. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1958. struct sbp_tport *tport = tpg->tport;
  1959. unsigned long val;
  1960. int ret;
  1961. if (kstrtoul(page, 0, &val) < 0)
  1962. return -EINVAL;
  1963. if ((val < 1) || (val > 127))
  1964. return -EINVAL;
  1965. if (tport->mgt_orb_timeout == val)
  1966. return count;
  1967. tport->mgt_orb_timeout = val;
  1968. ret = sbp_update_unit_directory(tport);
  1969. if (ret < 0)
  1970. return ret;
  1971. return count;
  1972. }
  1973. static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
  1974. struct se_portal_group *se_tpg,
  1975. char *page)
  1976. {
  1977. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1978. struct sbp_tport *tport = tpg->tport;
  1979. return sprintf(page, "%d\n", tport->max_reconnect_timeout);
  1980. }
  1981. static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
  1982. struct se_portal_group *se_tpg,
  1983. const char *page,
  1984. size_t count)
  1985. {
  1986. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1987. struct sbp_tport *tport = tpg->tport;
  1988. unsigned long val;
  1989. int ret;
  1990. if (kstrtoul(page, 0, &val) < 0)
  1991. return -EINVAL;
  1992. if ((val < 1) || (val > 32767))
  1993. return -EINVAL;
  1994. if (tport->max_reconnect_timeout == val)
  1995. return count;
  1996. tport->max_reconnect_timeout = val;
  1997. ret = sbp_update_unit_directory(tport);
  1998. if (ret < 0)
  1999. return ret;
  2000. return count;
  2001. }
  2002. static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
  2003. struct se_portal_group *se_tpg,
  2004. char *page)
  2005. {
  2006. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  2007. struct sbp_tport *tport = tpg->tport;
  2008. return sprintf(page, "%d\n", tport->max_logins_per_lun);
  2009. }
  2010. static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
  2011. struct se_portal_group *se_tpg,
  2012. const char *page,
  2013. size_t count)
  2014. {
  2015. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  2016. struct sbp_tport *tport = tpg->tport;
  2017. unsigned long val;
  2018. if (kstrtoul(page, 0, &val) < 0)
  2019. return -EINVAL;
  2020. if ((val < 1) || (val > 127))
  2021. return -EINVAL;
  2022. /* XXX: also check against current count? */
  2023. tport->max_logins_per_lun = val;
  2024. return count;
  2025. }
  2026. TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR);
  2027. TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR);
  2028. TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR);
  2029. static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
  2030. &sbp_tpg_attrib_mgt_orb_timeout.attr,
  2031. &sbp_tpg_attrib_max_reconnect_timeout.attr,
  2032. &sbp_tpg_attrib_max_logins_per_lun.attr,
  2033. NULL,
  2034. };
  2035. static struct target_core_fabric_ops sbp_ops = {
  2036. .get_fabric_name = sbp_get_fabric_name,
  2037. .get_fabric_proto_ident = sbp_get_fabric_proto_ident,
  2038. .tpg_get_wwn = sbp_get_fabric_wwn,
  2039. .tpg_get_tag = sbp_get_tag,
  2040. .tpg_get_default_depth = sbp_get_default_depth,
  2041. .tpg_get_pr_transport_id = sbp_get_pr_transport_id,
  2042. .tpg_get_pr_transport_id_len = sbp_get_pr_transport_id_len,
  2043. .tpg_parse_pr_out_transport_id = sbp_parse_pr_out_transport_id,
  2044. .tpg_check_demo_mode = sbp_check_true,
  2045. .tpg_check_demo_mode_cache = sbp_check_true,
  2046. .tpg_check_demo_mode_write_protect = sbp_check_false,
  2047. .tpg_check_prod_mode_write_protect = sbp_check_false,
  2048. .tpg_alloc_fabric_acl = sbp_alloc_fabric_acl,
  2049. .tpg_release_fabric_acl = sbp_release_fabric_acl,
  2050. .tpg_get_inst_index = sbp_tpg_get_inst_index,
  2051. .release_cmd = sbp_release_cmd,
  2052. .shutdown_session = sbp_shutdown_session,
  2053. .close_session = sbp_close_session,
  2054. .sess_get_index = sbp_sess_get_index,
  2055. .write_pending = sbp_write_pending,
  2056. .write_pending_status = sbp_write_pending_status,
  2057. .set_default_node_attributes = sbp_set_default_node_attrs,
  2058. .get_task_tag = sbp_get_task_tag,
  2059. .get_cmd_state = sbp_get_cmd_state,
  2060. .queue_data_in = sbp_queue_data_in,
  2061. .queue_status = sbp_queue_status,
  2062. .queue_tm_rsp = sbp_queue_tm_rsp,
  2063. .check_stop_free = sbp_check_stop_free,
  2064. .fabric_make_wwn = sbp_make_tport,
  2065. .fabric_drop_wwn = sbp_drop_tport,
  2066. .fabric_make_tpg = sbp_make_tpg,
  2067. .fabric_drop_tpg = sbp_drop_tpg,
  2068. .fabric_post_link = sbp_post_link_lun,
  2069. .fabric_pre_unlink = sbp_pre_unlink_lun,
  2070. .fabric_make_np = NULL,
  2071. .fabric_drop_np = NULL,
  2072. .fabric_make_nodeacl = sbp_make_nodeacl,
  2073. .fabric_drop_nodeacl = sbp_drop_nodeacl,
  2074. };
  2075. static int sbp_register_configfs(void)
  2076. {
  2077. struct target_fabric_configfs *fabric;
  2078. int ret;
  2079. fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
  2080. if (IS_ERR(fabric)) {
  2081. pr_err("target_fabric_configfs_init() failed\n");
  2082. return PTR_ERR(fabric);
  2083. }
  2084. fabric->tf_ops = sbp_ops;
  2085. /*
  2086. * Setup default attribute lists for various fabric->tf_cit_tmpl
  2087. */
  2088. TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
  2089. TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
  2090. TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
  2091. TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
  2092. TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
  2093. TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
  2094. TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
  2095. TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
  2096. TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
  2097. ret = target_fabric_configfs_register(fabric);
  2098. if (ret < 0) {
  2099. pr_err("target_fabric_configfs_register() failed for SBP\n");
  2100. return ret;
  2101. }
  2102. sbp_fabric_configfs = fabric;
  2103. return 0;
  2104. };
  2105. static void sbp_deregister_configfs(void)
  2106. {
  2107. if (!sbp_fabric_configfs)
  2108. return;
  2109. target_fabric_configfs_deregister(sbp_fabric_configfs);
  2110. sbp_fabric_configfs = NULL;
  2111. };
  2112. static int __init sbp_init(void)
  2113. {
  2114. int ret;
  2115. ret = sbp_register_configfs();
  2116. if (ret < 0)
  2117. return ret;
  2118. return 0;
  2119. };
  2120. static void __exit sbp_exit(void)
  2121. {
  2122. sbp_deregister_configfs();
  2123. };
  2124. MODULE_DESCRIPTION("FireWire SBP fabric driver");
  2125. MODULE_LICENSE("GPL");
  2126. module_init(sbp_init);
  2127. module_exit(sbp_exit);