ib_srp.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506
  1. /*
  2. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #define pr_fmt(fmt) PFX fmt
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/err.h>
  37. #include <linux/string.h>
  38. #include <linux/parser.h>
  39. #include <linux/random.h>
  40. #include <linux/jiffies.h>
  41. #include <linux/atomic.h>
  42. #include <scsi/scsi.h>
  43. #include <scsi/scsi_device.h>
  44. #include <scsi/scsi_dbg.h>
  45. #include <scsi/srp.h>
  46. #include <scsi/scsi_transport_srp.h>
  47. #include "ib_srp.h"
  48. #define DRV_NAME "ib_srp"
  49. #define PFX DRV_NAME ": "
  50. #define DRV_VERSION "0.2"
  51. #define DRV_RELDATE "November 1, 2005"
  52. MODULE_AUTHOR("Roland Dreier");
  53. MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
  54. "v" DRV_VERSION " (" DRV_RELDATE ")");
  55. MODULE_LICENSE("Dual BSD/GPL");
  56. static unsigned int srp_sg_tablesize;
  57. static unsigned int cmd_sg_entries;
  58. static unsigned int indirect_sg_entries;
  59. static bool allow_ext_sg;
  60. static int topspin_workarounds = 1;
  61. module_param(srp_sg_tablesize, uint, 0444);
  62. MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
  63. module_param(cmd_sg_entries, uint, 0444);
  64. MODULE_PARM_DESC(cmd_sg_entries,
  65. "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
  66. module_param(indirect_sg_entries, uint, 0444);
  67. MODULE_PARM_DESC(indirect_sg_entries,
  68. "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
  69. module_param(allow_ext_sg, bool, 0444);
  70. MODULE_PARM_DESC(allow_ext_sg,
  71. "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
  72. module_param(topspin_workarounds, int, 0444);
  73. MODULE_PARM_DESC(topspin_workarounds,
  74. "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
  75. static void srp_add_one(struct ib_device *device);
  76. static void srp_remove_one(struct ib_device *device);
  77. static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
  78. static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
  79. static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
  80. static struct scsi_transport_template *ib_srp_transport_template;
  81. static struct ib_client srp_client = {
  82. .name = "srp",
  83. .add = srp_add_one,
  84. .remove = srp_remove_one
  85. };
  86. static struct ib_sa_client srp_sa_client;
  87. static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
  88. {
  89. return (struct srp_target_port *) host->hostdata;
  90. }
  91. static const char *srp_target_info(struct Scsi_Host *host)
  92. {
  93. return host_to_target(host)->target_name;
  94. }
  95. static int srp_target_is_topspin(struct srp_target_port *target)
  96. {
  97. static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
  98. static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
  99. return topspin_workarounds &&
  100. (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
  101. !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
  102. }
  103. static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
  104. gfp_t gfp_mask,
  105. enum dma_data_direction direction)
  106. {
  107. struct srp_iu *iu;
  108. iu = kmalloc(sizeof *iu, gfp_mask);
  109. if (!iu)
  110. goto out;
  111. iu->buf = kzalloc(size, gfp_mask);
  112. if (!iu->buf)
  113. goto out_free_iu;
  114. iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
  115. direction);
  116. if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
  117. goto out_free_buf;
  118. iu->size = size;
  119. iu->direction = direction;
  120. return iu;
  121. out_free_buf:
  122. kfree(iu->buf);
  123. out_free_iu:
  124. kfree(iu);
  125. out:
  126. return NULL;
  127. }
  128. static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
  129. {
  130. if (!iu)
  131. return;
  132. ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
  133. iu->direction);
  134. kfree(iu->buf);
  135. kfree(iu);
  136. }
  137. static void srp_qp_event(struct ib_event *event, void *context)
  138. {
  139. pr_debug("QP event %d\n", event->event);
  140. }
  141. static int srp_init_qp(struct srp_target_port *target,
  142. struct ib_qp *qp)
  143. {
  144. struct ib_qp_attr *attr;
  145. int ret;
  146. attr = kmalloc(sizeof *attr, GFP_KERNEL);
  147. if (!attr)
  148. return -ENOMEM;
  149. ret = ib_find_pkey(target->srp_host->srp_dev->dev,
  150. target->srp_host->port,
  151. be16_to_cpu(target->path.pkey),
  152. &attr->pkey_index);
  153. if (ret)
  154. goto out;
  155. attr->qp_state = IB_QPS_INIT;
  156. attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
  157. IB_ACCESS_REMOTE_WRITE);
  158. attr->port_num = target->srp_host->port;
  159. ret = ib_modify_qp(qp, attr,
  160. IB_QP_STATE |
  161. IB_QP_PKEY_INDEX |
  162. IB_QP_ACCESS_FLAGS |
  163. IB_QP_PORT);
  164. out:
  165. kfree(attr);
  166. return ret;
  167. }
  168. static int srp_new_cm_id(struct srp_target_port *target)
  169. {
  170. struct ib_cm_id *new_cm_id;
  171. new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
  172. srp_cm_handler, target);
  173. if (IS_ERR(new_cm_id))
  174. return PTR_ERR(new_cm_id);
  175. if (target->cm_id)
  176. ib_destroy_cm_id(target->cm_id);
  177. target->cm_id = new_cm_id;
  178. return 0;
  179. }
  180. static int srp_create_target_ib(struct srp_target_port *target)
  181. {
  182. struct ib_qp_init_attr *init_attr;
  183. int ret;
  184. init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
  185. if (!init_attr)
  186. return -ENOMEM;
  187. target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
  188. srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
  189. if (IS_ERR(target->recv_cq)) {
  190. ret = PTR_ERR(target->recv_cq);
  191. goto err;
  192. }
  193. target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
  194. srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
  195. if (IS_ERR(target->send_cq)) {
  196. ret = PTR_ERR(target->send_cq);
  197. goto err_recv_cq;
  198. }
  199. ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
  200. init_attr->event_handler = srp_qp_event;
  201. init_attr->cap.max_send_wr = SRP_SQ_SIZE;
  202. init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
  203. init_attr->cap.max_recv_sge = 1;
  204. init_attr->cap.max_send_sge = 1;
  205. init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
  206. init_attr->qp_type = IB_QPT_RC;
  207. init_attr->send_cq = target->send_cq;
  208. init_attr->recv_cq = target->recv_cq;
  209. target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
  210. if (IS_ERR(target->qp)) {
  211. ret = PTR_ERR(target->qp);
  212. goto err_send_cq;
  213. }
  214. ret = srp_init_qp(target, target->qp);
  215. if (ret)
  216. goto err_qp;
  217. kfree(init_attr);
  218. return 0;
  219. err_qp:
  220. ib_destroy_qp(target->qp);
  221. err_send_cq:
  222. ib_destroy_cq(target->send_cq);
  223. err_recv_cq:
  224. ib_destroy_cq(target->recv_cq);
  225. err:
  226. kfree(init_attr);
  227. return ret;
  228. }
  229. static void srp_free_target_ib(struct srp_target_port *target)
  230. {
  231. int i;
  232. ib_destroy_qp(target->qp);
  233. ib_destroy_cq(target->send_cq);
  234. ib_destroy_cq(target->recv_cq);
  235. for (i = 0; i < SRP_RQ_SIZE; ++i)
  236. srp_free_iu(target->srp_host, target->rx_ring[i]);
  237. for (i = 0; i < SRP_SQ_SIZE; ++i)
  238. srp_free_iu(target->srp_host, target->tx_ring[i]);
  239. }
  240. static void srp_path_rec_completion(int status,
  241. struct ib_sa_path_rec *pathrec,
  242. void *target_ptr)
  243. {
  244. struct srp_target_port *target = target_ptr;
  245. target->status = status;
  246. if (status)
  247. shost_printk(KERN_ERR, target->scsi_host,
  248. PFX "Got failed path rec status %d\n", status);
  249. else
  250. target->path = *pathrec;
  251. complete(&target->done);
  252. }
  253. static int srp_lookup_path(struct srp_target_port *target)
  254. {
  255. target->path.numb_path = 1;
  256. init_completion(&target->done);
  257. target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
  258. target->srp_host->srp_dev->dev,
  259. target->srp_host->port,
  260. &target->path,
  261. IB_SA_PATH_REC_SERVICE_ID |
  262. IB_SA_PATH_REC_DGID |
  263. IB_SA_PATH_REC_SGID |
  264. IB_SA_PATH_REC_NUMB_PATH |
  265. IB_SA_PATH_REC_PKEY,
  266. SRP_PATH_REC_TIMEOUT_MS,
  267. GFP_KERNEL,
  268. srp_path_rec_completion,
  269. target, &target->path_query);
  270. if (target->path_query_id < 0)
  271. return target->path_query_id;
  272. wait_for_completion(&target->done);
  273. if (target->status < 0)
  274. shost_printk(KERN_WARNING, target->scsi_host,
  275. PFX "Path record query failed\n");
  276. return target->status;
  277. }
  278. static int srp_send_req(struct srp_target_port *target)
  279. {
  280. struct {
  281. struct ib_cm_req_param param;
  282. struct srp_login_req priv;
  283. } *req = NULL;
  284. int status;
  285. req = kzalloc(sizeof *req, GFP_KERNEL);
  286. if (!req)
  287. return -ENOMEM;
  288. req->param.primary_path = &target->path;
  289. req->param.alternate_path = NULL;
  290. req->param.service_id = target->service_id;
  291. req->param.qp_num = target->qp->qp_num;
  292. req->param.qp_type = target->qp->qp_type;
  293. req->param.private_data = &req->priv;
  294. req->param.private_data_len = sizeof req->priv;
  295. req->param.flow_control = 1;
  296. get_random_bytes(&req->param.starting_psn, 4);
  297. req->param.starting_psn &= 0xffffff;
  298. /*
  299. * Pick some arbitrary defaults here; we could make these
  300. * module parameters if anyone cared about setting them.
  301. */
  302. req->param.responder_resources = 4;
  303. req->param.remote_cm_response_timeout = 20;
  304. req->param.local_cm_response_timeout = 20;
  305. req->param.retry_count = 7;
  306. req->param.rnr_retry_count = 7;
  307. req->param.max_cm_retries = 15;
  308. req->priv.opcode = SRP_LOGIN_REQ;
  309. req->priv.tag = 0;
  310. req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
  311. req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
  312. SRP_BUF_FORMAT_INDIRECT);
  313. /*
  314. * In the published SRP specification (draft rev. 16a), the
  315. * port identifier format is 8 bytes of ID extension followed
  316. * by 8 bytes of GUID. Older drafts put the two halves in the
  317. * opposite order, so that the GUID comes first.
  318. *
  319. * Targets conforming to these obsolete drafts can be
  320. * recognized by the I/O Class they report.
  321. */
  322. if (target->io_class == SRP_REV10_IB_IO_CLASS) {
  323. memcpy(req->priv.initiator_port_id,
  324. &target->path.sgid.global.interface_id, 8);
  325. memcpy(req->priv.initiator_port_id + 8,
  326. &target->initiator_ext, 8);
  327. memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
  328. memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
  329. } else {
  330. memcpy(req->priv.initiator_port_id,
  331. &target->initiator_ext, 8);
  332. memcpy(req->priv.initiator_port_id + 8,
  333. &target->path.sgid.global.interface_id, 8);
  334. memcpy(req->priv.target_port_id, &target->id_ext, 8);
  335. memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
  336. }
  337. /*
  338. * Topspin/Cisco SRP targets will reject our login unless we
  339. * zero out the first 8 bytes of our initiator port ID and set
  340. * the second 8 bytes to the local node GUID.
  341. */
  342. if (srp_target_is_topspin(target)) {
  343. shost_printk(KERN_DEBUG, target->scsi_host,
  344. PFX "Topspin/Cisco initiator port ID workaround "
  345. "activated for target GUID %016llx\n",
  346. (unsigned long long) be64_to_cpu(target->ioc_guid));
  347. memset(req->priv.initiator_port_id, 0, 8);
  348. memcpy(req->priv.initiator_port_id + 8,
  349. &target->srp_host->srp_dev->dev->node_guid, 8);
  350. }
  351. status = ib_send_cm_req(target->cm_id, &req->param);
  352. kfree(req);
  353. return status;
  354. }
  355. static void srp_disconnect_target(struct srp_target_port *target)
  356. {
  357. /* XXX should send SRP_I_LOGOUT request */
  358. init_completion(&target->done);
  359. if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
  360. shost_printk(KERN_DEBUG, target->scsi_host,
  361. PFX "Sending CM DREQ failed\n");
  362. return;
  363. }
  364. wait_for_completion(&target->done);
  365. }
  366. static bool srp_change_state(struct srp_target_port *target,
  367. enum srp_target_state old,
  368. enum srp_target_state new)
  369. {
  370. bool changed = false;
  371. spin_lock_irq(&target->lock);
  372. if (target->state == old) {
  373. target->state = new;
  374. changed = true;
  375. }
  376. spin_unlock_irq(&target->lock);
  377. return changed;
  378. }
  379. static void srp_free_req_data(struct srp_target_port *target)
  380. {
  381. struct ib_device *ibdev = target->srp_host->srp_dev->dev;
  382. struct srp_request *req;
  383. int i;
  384. for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
  385. kfree(req->fmr_list);
  386. kfree(req->map_page);
  387. if (req->indirect_dma_addr) {
  388. ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
  389. target->indirect_size,
  390. DMA_TO_DEVICE);
  391. }
  392. kfree(req->indirect_desc);
  393. }
  394. }
  395. /**
  396. * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
  397. * @shost: SCSI host whose attributes to remove from sysfs.
  398. *
  399. * Note: Any attributes defined in the host template and that did not exist
  400. * before invocation of this function will be ignored.
  401. */
  402. static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
  403. {
  404. struct device_attribute **attr;
  405. for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
  406. device_remove_file(&shost->shost_dev, *attr);
  407. }
  408. static void srp_remove_work(struct work_struct *work)
  409. {
  410. struct srp_target_port *target =
  411. container_of(work, struct srp_target_port, work);
  412. if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
  413. return;
  414. spin_lock(&target->srp_host->target_lock);
  415. list_del(&target->list);
  416. spin_unlock(&target->srp_host->target_lock);
  417. srp_del_scsi_host_attr(target->scsi_host);
  418. srp_remove_host(target->scsi_host);
  419. scsi_remove_host(target->scsi_host);
  420. ib_destroy_cm_id(target->cm_id);
  421. srp_free_target_ib(target);
  422. srp_free_req_data(target);
  423. scsi_host_put(target->scsi_host);
  424. }
  425. static int srp_connect_target(struct srp_target_port *target)
  426. {
  427. int retries = 3;
  428. int ret;
  429. ret = srp_lookup_path(target);
  430. if (ret)
  431. return ret;
  432. while (1) {
  433. init_completion(&target->done);
  434. ret = srp_send_req(target);
  435. if (ret)
  436. return ret;
  437. wait_for_completion(&target->done);
  438. /*
  439. * The CM event handling code will set status to
  440. * SRP_PORT_REDIRECT if we get a port redirect REJ
  441. * back, or SRP_DLID_REDIRECT if we get a lid/qp
  442. * redirect REJ back.
  443. */
  444. switch (target->status) {
  445. case 0:
  446. return 0;
  447. case SRP_PORT_REDIRECT:
  448. ret = srp_lookup_path(target);
  449. if (ret)
  450. return ret;
  451. break;
  452. case SRP_DLID_REDIRECT:
  453. break;
  454. case SRP_STALE_CONN:
  455. /* Our current CM id was stale, and is now in timewait.
  456. * Try to reconnect with a new one.
  457. */
  458. if (!retries-- || srp_new_cm_id(target)) {
  459. shost_printk(KERN_ERR, target->scsi_host, PFX
  460. "giving up on stale connection\n");
  461. target->status = -ECONNRESET;
  462. return target->status;
  463. }
  464. shost_printk(KERN_ERR, target->scsi_host, PFX
  465. "retrying stale connection\n");
  466. break;
  467. default:
  468. return target->status;
  469. }
  470. }
  471. }
  472. static void srp_unmap_data(struct scsi_cmnd *scmnd,
  473. struct srp_target_port *target,
  474. struct srp_request *req)
  475. {
  476. struct ib_device *ibdev = target->srp_host->srp_dev->dev;
  477. struct ib_pool_fmr **pfmr;
  478. if (!scsi_sglist(scmnd) ||
  479. (scmnd->sc_data_direction != DMA_TO_DEVICE &&
  480. scmnd->sc_data_direction != DMA_FROM_DEVICE))
  481. return;
  482. pfmr = req->fmr_list;
  483. while (req->nfmr--)
  484. ib_fmr_pool_unmap(*pfmr++);
  485. ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
  486. scmnd->sc_data_direction);
  487. }
  488. static void srp_remove_req(struct srp_target_port *target,
  489. struct srp_request *req, s32 req_lim_delta)
  490. {
  491. unsigned long flags;
  492. srp_unmap_data(req->scmnd, target, req);
  493. spin_lock_irqsave(&target->lock, flags);
  494. target->req_lim += req_lim_delta;
  495. req->scmnd = NULL;
  496. list_add_tail(&req->list, &target->free_reqs);
  497. spin_unlock_irqrestore(&target->lock, flags);
  498. }
  499. static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
  500. {
  501. req->scmnd->result = DID_RESET << 16;
  502. req->scmnd->scsi_done(req->scmnd);
  503. srp_remove_req(target, req, 0);
  504. }
  505. static int srp_reconnect_target(struct srp_target_port *target)
  506. {
  507. struct ib_qp_attr qp_attr;
  508. struct ib_wc wc;
  509. int i, ret;
  510. if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
  511. return -EAGAIN;
  512. srp_disconnect_target(target);
  513. /*
  514. * Now get a new local CM ID so that we avoid confusing the
  515. * target in case things are really fouled up.
  516. */
  517. ret = srp_new_cm_id(target);
  518. if (ret)
  519. goto err;
  520. qp_attr.qp_state = IB_QPS_RESET;
  521. ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
  522. if (ret)
  523. goto err;
  524. ret = srp_init_qp(target, target->qp);
  525. if (ret)
  526. goto err;
  527. while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
  528. ; /* nothing */
  529. while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
  530. ; /* nothing */
  531. for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
  532. struct srp_request *req = &target->req_ring[i];
  533. if (req->scmnd)
  534. srp_reset_req(target, req);
  535. }
  536. INIT_LIST_HEAD(&target->free_tx);
  537. for (i = 0; i < SRP_SQ_SIZE; ++i)
  538. list_add(&target->tx_ring[i]->list, &target->free_tx);
  539. target->qp_in_error = 0;
  540. ret = srp_connect_target(target);
  541. if (ret)
  542. goto err;
  543. if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
  544. ret = -EAGAIN;
  545. return ret;
  546. err:
  547. shost_printk(KERN_ERR, target->scsi_host,
  548. PFX "reconnect failed (%d), removing target port.\n", ret);
  549. /*
  550. * We couldn't reconnect, so kill our target port off.
  551. * However, we have to defer the real removal because we
  552. * are in the context of the SCSI error handler now, which
  553. * will deadlock if we call scsi_remove_host().
  554. *
  555. * Schedule our work inside the lock to avoid a race with
  556. * the flush_scheduled_work() in srp_remove_one().
  557. */
  558. spin_lock_irq(&target->lock);
  559. if (target->state == SRP_TARGET_CONNECTING) {
  560. target->state = SRP_TARGET_DEAD;
  561. INIT_WORK(&target->work, srp_remove_work);
  562. queue_work(ib_wq, &target->work);
  563. }
  564. spin_unlock_irq(&target->lock);
  565. return ret;
  566. }
  567. static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
  568. unsigned int dma_len, u32 rkey)
  569. {
  570. struct srp_direct_buf *desc = state->desc;
  571. desc->va = cpu_to_be64(dma_addr);
  572. desc->key = cpu_to_be32(rkey);
  573. desc->len = cpu_to_be32(dma_len);
  574. state->total_len += dma_len;
  575. state->desc++;
  576. state->ndesc++;
  577. }
  578. static int srp_map_finish_fmr(struct srp_map_state *state,
  579. struct srp_target_port *target)
  580. {
  581. struct srp_device *dev = target->srp_host->srp_dev;
  582. struct ib_pool_fmr *fmr;
  583. u64 io_addr = 0;
  584. if (!state->npages)
  585. return 0;
  586. if (state->npages == 1) {
  587. srp_map_desc(state, state->base_dma_addr, state->fmr_len,
  588. target->rkey);
  589. state->npages = state->fmr_len = 0;
  590. return 0;
  591. }
  592. fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
  593. state->npages, io_addr);
  594. if (IS_ERR(fmr))
  595. return PTR_ERR(fmr);
  596. *state->next_fmr++ = fmr;
  597. state->nfmr++;
  598. srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
  599. state->npages = state->fmr_len = 0;
  600. return 0;
  601. }
  602. static void srp_map_update_start(struct srp_map_state *state,
  603. struct scatterlist *sg, int sg_index,
  604. dma_addr_t dma_addr)
  605. {
  606. state->unmapped_sg = sg;
  607. state->unmapped_index = sg_index;
  608. state->unmapped_addr = dma_addr;
  609. }
  610. static int srp_map_sg_entry(struct srp_map_state *state,
  611. struct srp_target_port *target,
  612. struct scatterlist *sg, int sg_index,
  613. int use_fmr)
  614. {
  615. struct srp_device *dev = target->srp_host->srp_dev;
  616. struct ib_device *ibdev = dev->dev;
  617. dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
  618. unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
  619. unsigned int len;
  620. int ret;
  621. if (!dma_len)
  622. return 0;
  623. if (use_fmr == SRP_MAP_NO_FMR) {
  624. /* Once we're in direct map mode for a request, we don't
  625. * go back to FMR mode, so no need to update anything
  626. * other than the descriptor.
  627. */
  628. srp_map_desc(state, dma_addr, dma_len, target->rkey);
  629. return 0;
  630. }
  631. /* If we start at an offset into the FMR page, don't merge into
  632. * the current FMR. Finish it out, and use the kernel's MR for this
  633. * sg entry. This is to avoid potential bugs on some SRP targets
  634. * that were never quite defined, but went away when the initiator
  635. * avoided using FMR on such page fragments.
  636. */
  637. if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
  638. ret = srp_map_finish_fmr(state, target);
  639. if (ret)
  640. return ret;
  641. srp_map_desc(state, dma_addr, dma_len, target->rkey);
  642. srp_map_update_start(state, NULL, 0, 0);
  643. return 0;
  644. }
  645. /* If this is the first sg to go into the FMR, save our position.
  646. * We need to know the first unmapped entry, its index, and the
  647. * first unmapped address within that entry to be able to restart
  648. * mapping after an error.
  649. */
  650. if (!state->unmapped_sg)
  651. srp_map_update_start(state, sg, sg_index, dma_addr);
  652. while (dma_len) {
  653. if (state->npages == SRP_FMR_SIZE) {
  654. ret = srp_map_finish_fmr(state, target);
  655. if (ret)
  656. return ret;
  657. srp_map_update_start(state, sg, sg_index, dma_addr);
  658. }
  659. len = min_t(unsigned int, dma_len, dev->fmr_page_size);
  660. if (!state->npages)
  661. state->base_dma_addr = dma_addr;
  662. state->pages[state->npages++] = dma_addr;
  663. state->fmr_len += len;
  664. dma_addr += len;
  665. dma_len -= len;
  666. }
  667. /* If the last entry of the FMR wasn't a full page, then we need to
  668. * close it out and start a new one -- we can only merge at page
  669. * boundries.
  670. */
  671. ret = 0;
  672. if (len != dev->fmr_page_size) {
  673. ret = srp_map_finish_fmr(state, target);
  674. if (!ret)
  675. srp_map_update_start(state, NULL, 0, 0);
  676. }
  677. return ret;
  678. }
  679. static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
  680. struct srp_request *req)
  681. {
  682. struct scatterlist *scat, *sg;
  683. struct srp_cmd *cmd = req->cmd->buf;
  684. int i, len, nents, count, use_fmr;
  685. struct srp_device *dev;
  686. struct ib_device *ibdev;
  687. struct srp_map_state state;
  688. struct srp_indirect_buf *indirect_hdr;
  689. u32 table_len;
  690. u8 fmt;
  691. if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
  692. return sizeof (struct srp_cmd);
  693. if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
  694. scmnd->sc_data_direction != DMA_TO_DEVICE) {
  695. shost_printk(KERN_WARNING, target->scsi_host,
  696. PFX "Unhandled data direction %d\n",
  697. scmnd->sc_data_direction);
  698. return -EINVAL;
  699. }
  700. nents = scsi_sg_count(scmnd);
  701. scat = scsi_sglist(scmnd);
  702. dev = target->srp_host->srp_dev;
  703. ibdev = dev->dev;
  704. count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
  705. if (unlikely(count == 0))
  706. return -EIO;
  707. fmt = SRP_DATA_DESC_DIRECT;
  708. len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
  709. if (count == 1) {
  710. /*
  711. * The midlayer only generated a single gather/scatter
  712. * entry, or DMA mapping coalesced everything to a
  713. * single entry. So a direct descriptor along with
  714. * the DMA MR suffices.
  715. */
  716. struct srp_direct_buf *buf = (void *) cmd->add_data;
  717. buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
  718. buf->key = cpu_to_be32(target->rkey);
  719. buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
  720. req->nfmr = 0;
  721. goto map_complete;
  722. }
  723. /* We have more than one scatter/gather entry, so build our indirect
  724. * descriptor table, trying to merge as many entries with FMR as we
  725. * can.
  726. */
  727. indirect_hdr = (void *) cmd->add_data;
  728. ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
  729. target->indirect_size, DMA_TO_DEVICE);
  730. memset(&state, 0, sizeof(state));
  731. state.desc = req->indirect_desc;
  732. state.pages = req->map_page;
  733. state.next_fmr = req->fmr_list;
  734. use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
  735. for_each_sg(scat, sg, count, i) {
  736. if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
  737. /* FMR mapping failed, so backtrack to the first
  738. * unmapped entry and continue on without using FMR.
  739. */
  740. dma_addr_t dma_addr;
  741. unsigned int dma_len;
  742. backtrack:
  743. sg = state.unmapped_sg;
  744. i = state.unmapped_index;
  745. dma_addr = ib_sg_dma_address(ibdev, sg);
  746. dma_len = ib_sg_dma_len(ibdev, sg);
  747. dma_len -= (state.unmapped_addr - dma_addr);
  748. dma_addr = state.unmapped_addr;
  749. use_fmr = SRP_MAP_NO_FMR;
  750. srp_map_desc(&state, dma_addr, dma_len, target->rkey);
  751. }
  752. }
  753. if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
  754. goto backtrack;
  755. /* We've mapped the request, now pull as much of the indirect
  756. * descriptor table as we can into the command buffer. If this
  757. * target is not using an external indirect table, we are
  758. * guaranteed to fit into the command, as the SCSI layer won't
  759. * give us more S/G entries than we allow.
  760. */
  761. req->nfmr = state.nfmr;
  762. if (state.ndesc == 1) {
  763. /* FMR mapping was able to collapse this to one entry,
  764. * so use a direct descriptor.
  765. */
  766. struct srp_direct_buf *buf = (void *) cmd->add_data;
  767. *buf = req->indirect_desc[0];
  768. goto map_complete;
  769. }
  770. if (unlikely(target->cmd_sg_cnt < state.ndesc &&
  771. !target->allow_ext_sg)) {
  772. shost_printk(KERN_ERR, target->scsi_host,
  773. "Could not fit S/G list into SRP_CMD\n");
  774. return -EIO;
  775. }
  776. count = min(state.ndesc, target->cmd_sg_cnt);
  777. table_len = state.ndesc * sizeof (struct srp_direct_buf);
  778. fmt = SRP_DATA_DESC_INDIRECT;
  779. len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
  780. len += count * sizeof (struct srp_direct_buf);
  781. memcpy(indirect_hdr->desc_list, req->indirect_desc,
  782. count * sizeof (struct srp_direct_buf));
  783. indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
  784. indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
  785. indirect_hdr->table_desc.len = cpu_to_be32(table_len);
  786. indirect_hdr->len = cpu_to_be32(state.total_len);
  787. if (scmnd->sc_data_direction == DMA_TO_DEVICE)
  788. cmd->data_out_desc_cnt = count;
  789. else
  790. cmd->data_in_desc_cnt = count;
  791. ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
  792. DMA_TO_DEVICE);
  793. map_complete:
  794. if (scmnd->sc_data_direction == DMA_TO_DEVICE)
  795. cmd->buf_fmt = fmt << 4;
  796. else
  797. cmd->buf_fmt = fmt;
  798. return len;
  799. }
  800. /*
  801. * Return an IU and possible credit to the free pool
  802. */
  803. static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
  804. enum srp_iu_type iu_type)
  805. {
  806. unsigned long flags;
  807. spin_lock_irqsave(&target->lock, flags);
  808. list_add(&iu->list, &target->free_tx);
  809. if (iu_type != SRP_IU_RSP)
  810. ++target->req_lim;
  811. spin_unlock_irqrestore(&target->lock, flags);
  812. }
  813. /*
  814. * Must be called with target->lock held to protect req_lim and free_tx.
  815. * If IU is not sent, it must be returned using srp_put_tx_iu().
  816. *
  817. * Note:
  818. * An upper limit for the number of allocated information units for each
  819. * request type is:
  820. * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
  821. * more than Scsi_Host.can_queue requests.
  822. * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
  823. * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
  824. * one unanswered SRP request to an initiator.
  825. */
  826. static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
  827. enum srp_iu_type iu_type)
  828. {
  829. s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
  830. struct srp_iu *iu;
  831. srp_send_completion(target->send_cq, target);
  832. if (list_empty(&target->free_tx))
  833. return NULL;
  834. /* Initiator responses to target requests do not consume credits */
  835. if (iu_type != SRP_IU_RSP) {
  836. if (target->req_lim <= rsv) {
  837. ++target->zero_req_lim;
  838. return NULL;
  839. }
  840. --target->req_lim;
  841. }
  842. iu = list_first_entry(&target->free_tx, struct srp_iu, list);
  843. list_del(&iu->list);
  844. return iu;
  845. }
  846. static int srp_post_send(struct srp_target_port *target,
  847. struct srp_iu *iu, int len)
  848. {
  849. struct ib_sge list;
  850. struct ib_send_wr wr, *bad_wr;
  851. list.addr = iu->dma;
  852. list.length = len;
  853. list.lkey = target->lkey;
  854. wr.next = NULL;
  855. wr.wr_id = (uintptr_t) iu;
  856. wr.sg_list = &list;
  857. wr.num_sge = 1;
  858. wr.opcode = IB_WR_SEND;
  859. wr.send_flags = IB_SEND_SIGNALED;
  860. return ib_post_send(target->qp, &wr, &bad_wr);
  861. }
  862. static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
  863. {
  864. struct ib_recv_wr wr, *bad_wr;
  865. struct ib_sge list;
  866. list.addr = iu->dma;
  867. list.length = iu->size;
  868. list.lkey = target->lkey;
  869. wr.next = NULL;
  870. wr.wr_id = (uintptr_t) iu;
  871. wr.sg_list = &list;
  872. wr.num_sge = 1;
  873. return ib_post_recv(target->qp, &wr, &bad_wr);
  874. }
  875. static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
  876. {
  877. struct srp_request *req;
  878. struct scsi_cmnd *scmnd;
  879. unsigned long flags;
  880. if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
  881. spin_lock_irqsave(&target->lock, flags);
  882. target->req_lim += be32_to_cpu(rsp->req_lim_delta);
  883. spin_unlock_irqrestore(&target->lock, flags);
  884. target->tsk_mgmt_status = -1;
  885. if (be32_to_cpu(rsp->resp_data_len) >= 4)
  886. target->tsk_mgmt_status = rsp->data[3];
  887. complete(&target->tsk_mgmt_done);
  888. } else {
  889. req = &target->req_ring[rsp->tag];
  890. scmnd = req->scmnd;
  891. if (!scmnd)
  892. shost_printk(KERN_ERR, target->scsi_host,
  893. "Null scmnd for RSP w/tag %016llx\n",
  894. (unsigned long long) rsp->tag);
  895. scmnd->result = rsp->status;
  896. if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
  897. memcpy(scmnd->sense_buffer, rsp->data +
  898. be32_to_cpu(rsp->resp_data_len),
  899. min_t(int, be32_to_cpu(rsp->sense_data_len),
  900. SCSI_SENSE_BUFFERSIZE));
  901. }
  902. if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
  903. scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
  904. else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
  905. scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
  906. srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
  907. scmnd->host_scribble = NULL;
  908. scmnd->scsi_done(scmnd);
  909. }
  910. }
  911. static int srp_response_common(struct srp_target_port *target, s32 req_delta,
  912. void *rsp, int len)
  913. {
  914. struct ib_device *dev = target->srp_host->srp_dev->dev;
  915. unsigned long flags;
  916. struct srp_iu *iu;
  917. int err;
  918. spin_lock_irqsave(&target->lock, flags);
  919. target->req_lim += req_delta;
  920. iu = __srp_get_tx_iu(target, SRP_IU_RSP);
  921. spin_unlock_irqrestore(&target->lock, flags);
  922. if (!iu) {
  923. shost_printk(KERN_ERR, target->scsi_host, PFX
  924. "no IU available to send response\n");
  925. return 1;
  926. }
  927. ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
  928. memcpy(iu->buf, rsp, len);
  929. ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
  930. err = srp_post_send(target, iu, len);
  931. if (err) {
  932. shost_printk(KERN_ERR, target->scsi_host, PFX
  933. "unable to post response: %d\n", err);
  934. srp_put_tx_iu(target, iu, SRP_IU_RSP);
  935. }
  936. return err;
  937. }
  938. static void srp_process_cred_req(struct srp_target_port *target,
  939. struct srp_cred_req *req)
  940. {
  941. struct srp_cred_rsp rsp = {
  942. .opcode = SRP_CRED_RSP,
  943. .tag = req->tag,
  944. };
  945. s32 delta = be32_to_cpu(req->req_lim_delta);
  946. if (srp_response_common(target, delta, &rsp, sizeof rsp))
  947. shost_printk(KERN_ERR, target->scsi_host, PFX
  948. "problems processing SRP_CRED_REQ\n");
  949. }
  950. static void srp_process_aer_req(struct srp_target_port *target,
  951. struct srp_aer_req *req)
  952. {
  953. struct srp_aer_rsp rsp = {
  954. .opcode = SRP_AER_RSP,
  955. .tag = req->tag,
  956. };
  957. s32 delta = be32_to_cpu(req->req_lim_delta);
  958. shost_printk(KERN_ERR, target->scsi_host, PFX
  959. "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
  960. if (srp_response_common(target, delta, &rsp, sizeof rsp))
  961. shost_printk(KERN_ERR, target->scsi_host, PFX
  962. "problems processing SRP_AER_REQ\n");
  963. }
  964. static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
  965. {
  966. struct ib_device *dev = target->srp_host->srp_dev->dev;
  967. struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
  968. int res;
  969. u8 opcode;
  970. ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
  971. DMA_FROM_DEVICE);
  972. opcode = *(u8 *) iu->buf;
  973. if (0) {
  974. shost_printk(KERN_ERR, target->scsi_host,
  975. PFX "recv completion, opcode 0x%02x\n", opcode);
  976. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
  977. iu->buf, wc->byte_len, true);
  978. }
  979. switch (opcode) {
  980. case SRP_RSP:
  981. srp_process_rsp(target, iu->buf);
  982. break;
  983. case SRP_CRED_REQ:
  984. srp_process_cred_req(target, iu->buf);
  985. break;
  986. case SRP_AER_REQ:
  987. srp_process_aer_req(target, iu->buf);
  988. break;
  989. case SRP_T_LOGOUT:
  990. /* XXX Handle target logout */
  991. shost_printk(KERN_WARNING, target->scsi_host,
  992. PFX "Got target logout request\n");
  993. break;
  994. default:
  995. shost_printk(KERN_WARNING, target->scsi_host,
  996. PFX "Unhandled SRP opcode 0x%02x\n", opcode);
  997. break;
  998. }
  999. ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
  1000. DMA_FROM_DEVICE);
  1001. res = srp_post_recv(target, iu);
  1002. if (res != 0)
  1003. shost_printk(KERN_ERR, target->scsi_host,
  1004. PFX "Recv failed with error code %d\n", res);
  1005. }
  1006. static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
  1007. {
  1008. struct srp_target_port *target = target_ptr;
  1009. struct ib_wc wc;
  1010. ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
  1011. while (ib_poll_cq(cq, 1, &wc) > 0) {
  1012. if (wc.status) {
  1013. shost_printk(KERN_ERR, target->scsi_host,
  1014. PFX "failed receive status %d\n",
  1015. wc.status);
  1016. target->qp_in_error = 1;
  1017. break;
  1018. }
  1019. srp_handle_recv(target, &wc);
  1020. }
  1021. }
  1022. static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
  1023. {
  1024. struct srp_target_port *target = target_ptr;
  1025. struct ib_wc wc;
  1026. struct srp_iu *iu;
  1027. while (ib_poll_cq(cq, 1, &wc) > 0) {
  1028. if (wc.status) {
  1029. shost_printk(KERN_ERR, target->scsi_host,
  1030. PFX "failed send status %d\n",
  1031. wc.status);
  1032. target->qp_in_error = 1;
  1033. break;
  1034. }
  1035. iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
  1036. list_add(&iu->list, &target->free_tx);
  1037. }
  1038. }
  1039. static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
  1040. {
  1041. struct srp_target_port *target = host_to_target(shost);
  1042. struct srp_request *req;
  1043. struct srp_iu *iu;
  1044. struct srp_cmd *cmd;
  1045. struct ib_device *dev;
  1046. unsigned long flags;
  1047. int len;
  1048. if (target->state == SRP_TARGET_CONNECTING)
  1049. goto err;
  1050. if (target->state == SRP_TARGET_DEAD ||
  1051. target->state == SRP_TARGET_REMOVED) {
  1052. scmnd->result = DID_BAD_TARGET << 16;
  1053. scmnd->scsi_done(scmnd);
  1054. return 0;
  1055. }
  1056. spin_lock_irqsave(&target->lock, flags);
  1057. iu = __srp_get_tx_iu(target, SRP_IU_CMD);
  1058. if (!iu)
  1059. goto err_unlock;
  1060. req = list_first_entry(&target->free_reqs, struct srp_request, list);
  1061. list_del(&req->list);
  1062. spin_unlock_irqrestore(&target->lock, flags);
  1063. dev = target->srp_host->srp_dev->dev;
  1064. ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
  1065. DMA_TO_DEVICE);
  1066. scmnd->result = 0;
  1067. scmnd->host_scribble = (void *) req;
  1068. cmd = iu->buf;
  1069. memset(cmd, 0, sizeof *cmd);
  1070. cmd->opcode = SRP_CMD;
  1071. cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
  1072. cmd->tag = req->index;
  1073. memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
  1074. req->scmnd = scmnd;
  1075. req->cmd = iu;
  1076. len = srp_map_data(scmnd, target, req);
  1077. if (len < 0) {
  1078. shost_printk(KERN_ERR, target->scsi_host,
  1079. PFX "Failed to map data\n");
  1080. goto err_iu;
  1081. }
  1082. ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
  1083. DMA_TO_DEVICE);
  1084. if (srp_post_send(target, iu, len)) {
  1085. shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
  1086. goto err_unmap;
  1087. }
  1088. return 0;
  1089. err_unmap:
  1090. srp_unmap_data(scmnd, target, req);
  1091. err_iu:
  1092. srp_put_tx_iu(target, iu, SRP_IU_CMD);
  1093. spin_lock_irqsave(&target->lock, flags);
  1094. list_add(&req->list, &target->free_reqs);
  1095. err_unlock:
  1096. spin_unlock_irqrestore(&target->lock, flags);
  1097. err:
  1098. return SCSI_MLQUEUE_HOST_BUSY;
  1099. }
  1100. static int srp_alloc_iu_bufs(struct srp_target_port *target)
  1101. {
  1102. int i;
  1103. for (i = 0; i < SRP_RQ_SIZE; ++i) {
  1104. target->rx_ring[i] = srp_alloc_iu(target->srp_host,
  1105. target->max_ti_iu_len,
  1106. GFP_KERNEL, DMA_FROM_DEVICE);
  1107. if (!target->rx_ring[i])
  1108. goto err;
  1109. }
  1110. for (i = 0; i < SRP_SQ_SIZE; ++i) {
  1111. target->tx_ring[i] = srp_alloc_iu(target->srp_host,
  1112. target->max_iu_len,
  1113. GFP_KERNEL, DMA_TO_DEVICE);
  1114. if (!target->tx_ring[i])
  1115. goto err;
  1116. list_add(&target->tx_ring[i]->list, &target->free_tx);
  1117. }
  1118. return 0;
  1119. err:
  1120. for (i = 0; i < SRP_RQ_SIZE; ++i) {
  1121. srp_free_iu(target->srp_host, target->rx_ring[i]);
  1122. target->rx_ring[i] = NULL;
  1123. }
  1124. for (i = 0; i < SRP_SQ_SIZE; ++i) {
  1125. srp_free_iu(target->srp_host, target->tx_ring[i]);
  1126. target->tx_ring[i] = NULL;
  1127. }
  1128. return -ENOMEM;
  1129. }
  1130. static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
  1131. struct srp_login_rsp *lrsp,
  1132. struct srp_target_port *target)
  1133. {
  1134. struct ib_qp_attr *qp_attr = NULL;
  1135. int attr_mask = 0;
  1136. int ret;
  1137. int i;
  1138. if (lrsp->opcode == SRP_LOGIN_RSP) {
  1139. target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
  1140. target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
  1141. /*
  1142. * Reserve credits for task management so we don't
  1143. * bounce requests back to the SCSI mid-layer.
  1144. */
  1145. target->scsi_host->can_queue
  1146. = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
  1147. target->scsi_host->can_queue);
  1148. } else {
  1149. shost_printk(KERN_WARNING, target->scsi_host,
  1150. PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
  1151. ret = -ECONNRESET;
  1152. goto error;
  1153. }
  1154. if (!target->rx_ring[0]) {
  1155. ret = srp_alloc_iu_bufs(target);
  1156. if (ret)
  1157. goto error;
  1158. }
  1159. ret = -ENOMEM;
  1160. qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
  1161. if (!qp_attr)
  1162. goto error;
  1163. qp_attr->qp_state = IB_QPS_RTR;
  1164. ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
  1165. if (ret)
  1166. goto error_free;
  1167. ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
  1168. if (ret)
  1169. goto error_free;
  1170. for (i = 0; i < SRP_RQ_SIZE; i++) {
  1171. struct srp_iu *iu = target->rx_ring[i];
  1172. ret = srp_post_recv(target, iu);
  1173. if (ret)
  1174. goto error_free;
  1175. }
  1176. qp_attr->qp_state = IB_QPS_RTS;
  1177. ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
  1178. if (ret)
  1179. goto error_free;
  1180. ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
  1181. if (ret)
  1182. goto error_free;
  1183. ret = ib_send_cm_rtu(cm_id, NULL, 0);
  1184. error_free:
  1185. kfree(qp_attr);
  1186. error:
  1187. target->status = ret;
  1188. }
  1189. static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
  1190. struct ib_cm_event *event,
  1191. struct srp_target_port *target)
  1192. {
  1193. struct Scsi_Host *shost = target->scsi_host;
  1194. struct ib_class_port_info *cpi;
  1195. int opcode;
  1196. switch (event->param.rej_rcvd.reason) {
  1197. case IB_CM_REJ_PORT_CM_REDIRECT:
  1198. cpi = event->param.rej_rcvd.ari;
  1199. target->path.dlid = cpi->redirect_lid;
  1200. target->path.pkey = cpi->redirect_pkey;
  1201. cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
  1202. memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
  1203. target->status = target->path.dlid ?
  1204. SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
  1205. break;
  1206. case IB_CM_REJ_PORT_REDIRECT:
  1207. if (srp_target_is_topspin(target)) {
  1208. /*
  1209. * Topspin/Cisco SRP gateways incorrectly send
  1210. * reject reason code 25 when they mean 24
  1211. * (port redirect).
  1212. */
  1213. memcpy(target->path.dgid.raw,
  1214. event->param.rej_rcvd.ari, 16);
  1215. shost_printk(KERN_DEBUG, shost,
  1216. PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
  1217. (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
  1218. (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
  1219. target->status = SRP_PORT_REDIRECT;
  1220. } else {
  1221. shost_printk(KERN_WARNING, shost,
  1222. " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
  1223. target->status = -ECONNRESET;
  1224. }
  1225. break;
  1226. case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
  1227. shost_printk(KERN_WARNING, shost,
  1228. " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
  1229. target->status = -ECONNRESET;
  1230. break;
  1231. case IB_CM_REJ_CONSUMER_DEFINED:
  1232. opcode = *(u8 *) event->private_data;
  1233. if (opcode == SRP_LOGIN_REJ) {
  1234. struct srp_login_rej *rej = event->private_data;
  1235. u32 reason = be32_to_cpu(rej->reason);
  1236. if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
  1237. shost_printk(KERN_WARNING, shost,
  1238. PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
  1239. else
  1240. shost_printk(KERN_WARNING, shost,
  1241. PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
  1242. } else
  1243. shost_printk(KERN_WARNING, shost,
  1244. " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
  1245. " opcode 0x%02x\n", opcode);
  1246. target->status = -ECONNRESET;
  1247. break;
  1248. case IB_CM_REJ_STALE_CONN:
  1249. shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
  1250. target->status = SRP_STALE_CONN;
  1251. break;
  1252. default:
  1253. shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
  1254. event->param.rej_rcvd.reason);
  1255. target->status = -ECONNRESET;
  1256. }
  1257. }
  1258. static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  1259. {
  1260. struct srp_target_port *target = cm_id->context;
  1261. int comp = 0;
  1262. switch (event->event) {
  1263. case IB_CM_REQ_ERROR:
  1264. shost_printk(KERN_DEBUG, target->scsi_host,
  1265. PFX "Sending CM REQ failed\n");
  1266. comp = 1;
  1267. target->status = -ECONNRESET;
  1268. break;
  1269. case IB_CM_REP_RECEIVED:
  1270. comp = 1;
  1271. srp_cm_rep_handler(cm_id, event->private_data, target);
  1272. break;
  1273. case IB_CM_REJ_RECEIVED:
  1274. shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
  1275. comp = 1;
  1276. srp_cm_rej_handler(cm_id, event, target);
  1277. break;
  1278. case IB_CM_DREQ_RECEIVED:
  1279. shost_printk(KERN_WARNING, target->scsi_host,
  1280. PFX "DREQ received - connection closed\n");
  1281. if (ib_send_cm_drep(cm_id, NULL, 0))
  1282. shost_printk(KERN_ERR, target->scsi_host,
  1283. PFX "Sending CM DREP failed\n");
  1284. break;
  1285. case IB_CM_TIMEWAIT_EXIT:
  1286. shost_printk(KERN_ERR, target->scsi_host,
  1287. PFX "connection closed\n");
  1288. comp = 1;
  1289. target->status = 0;
  1290. break;
  1291. case IB_CM_MRA_RECEIVED:
  1292. case IB_CM_DREQ_ERROR:
  1293. case IB_CM_DREP_RECEIVED:
  1294. break;
  1295. default:
  1296. shost_printk(KERN_WARNING, target->scsi_host,
  1297. PFX "Unhandled CM event %d\n", event->event);
  1298. break;
  1299. }
  1300. if (comp)
  1301. complete(&target->done);
  1302. return 0;
  1303. }
  1304. static int srp_send_tsk_mgmt(struct srp_target_port *target,
  1305. u64 req_tag, unsigned int lun, u8 func)
  1306. {
  1307. struct ib_device *dev = target->srp_host->srp_dev->dev;
  1308. struct srp_iu *iu;
  1309. struct srp_tsk_mgmt *tsk_mgmt;
  1310. if (target->state == SRP_TARGET_DEAD ||
  1311. target->state == SRP_TARGET_REMOVED)
  1312. return -1;
  1313. init_completion(&target->tsk_mgmt_done);
  1314. spin_lock_irq(&target->lock);
  1315. iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
  1316. spin_unlock_irq(&target->lock);
  1317. if (!iu)
  1318. return -1;
  1319. ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
  1320. DMA_TO_DEVICE);
  1321. tsk_mgmt = iu->buf;
  1322. memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
  1323. tsk_mgmt->opcode = SRP_TSK_MGMT;
  1324. tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
  1325. tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
  1326. tsk_mgmt->tsk_mgmt_func = func;
  1327. tsk_mgmt->task_tag = req_tag;
  1328. ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
  1329. DMA_TO_DEVICE);
  1330. if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
  1331. srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
  1332. return -1;
  1333. }
  1334. if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
  1335. msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
  1336. return -1;
  1337. return 0;
  1338. }
  1339. static int srp_abort(struct scsi_cmnd *scmnd)
  1340. {
  1341. struct srp_target_port *target = host_to_target(scmnd->device->host);
  1342. struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
  1343. int ret = SUCCESS;
  1344. shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
  1345. if (!req || target->qp_in_error)
  1346. return FAILED;
  1347. if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
  1348. SRP_TSK_ABORT_TASK))
  1349. return FAILED;
  1350. if (req->scmnd) {
  1351. if (!target->tsk_mgmt_status) {
  1352. srp_remove_req(target, req, 0);
  1353. scmnd->result = DID_ABORT << 16;
  1354. } else
  1355. ret = FAILED;
  1356. }
  1357. return ret;
  1358. }
  1359. static int srp_reset_device(struct scsi_cmnd *scmnd)
  1360. {
  1361. struct srp_target_port *target = host_to_target(scmnd->device->host);
  1362. int i;
  1363. shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
  1364. if (target->qp_in_error)
  1365. return FAILED;
  1366. if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
  1367. SRP_TSK_LUN_RESET))
  1368. return FAILED;
  1369. if (target->tsk_mgmt_status)
  1370. return FAILED;
  1371. for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
  1372. struct srp_request *req = &target->req_ring[i];
  1373. if (req->scmnd && req->scmnd->device == scmnd->device)
  1374. srp_reset_req(target, req);
  1375. }
  1376. return SUCCESS;
  1377. }
  1378. static int srp_reset_host(struct scsi_cmnd *scmnd)
  1379. {
  1380. struct srp_target_port *target = host_to_target(scmnd->device->host);
  1381. int ret = FAILED;
  1382. shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
  1383. if (!srp_reconnect_target(target))
  1384. ret = SUCCESS;
  1385. return ret;
  1386. }
  1387. static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
  1388. char *buf)
  1389. {
  1390. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1391. return sprintf(buf, "0x%016llx\n",
  1392. (unsigned long long) be64_to_cpu(target->id_ext));
  1393. }
  1394. static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
  1395. char *buf)
  1396. {
  1397. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1398. return sprintf(buf, "0x%016llx\n",
  1399. (unsigned long long) be64_to_cpu(target->ioc_guid));
  1400. }
  1401. static ssize_t show_service_id(struct device *dev,
  1402. struct device_attribute *attr, char *buf)
  1403. {
  1404. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1405. return sprintf(buf, "0x%016llx\n",
  1406. (unsigned long long) be64_to_cpu(target->service_id));
  1407. }
  1408. static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
  1409. char *buf)
  1410. {
  1411. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1412. return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
  1413. }
  1414. static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
  1415. char *buf)
  1416. {
  1417. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1418. return sprintf(buf, "%pI6\n", target->path.dgid.raw);
  1419. }
  1420. static ssize_t show_orig_dgid(struct device *dev,
  1421. struct device_attribute *attr, char *buf)
  1422. {
  1423. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1424. return sprintf(buf, "%pI6\n", target->orig_dgid);
  1425. }
  1426. static ssize_t show_req_lim(struct device *dev,
  1427. struct device_attribute *attr, char *buf)
  1428. {
  1429. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1430. return sprintf(buf, "%d\n", target->req_lim);
  1431. }
  1432. static ssize_t show_zero_req_lim(struct device *dev,
  1433. struct device_attribute *attr, char *buf)
  1434. {
  1435. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1436. return sprintf(buf, "%d\n", target->zero_req_lim);
  1437. }
  1438. static ssize_t show_local_ib_port(struct device *dev,
  1439. struct device_attribute *attr, char *buf)
  1440. {
  1441. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1442. return sprintf(buf, "%d\n", target->srp_host->port);
  1443. }
  1444. static ssize_t show_local_ib_device(struct device *dev,
  1445. struct device_attribute *attr, char *buf)
  1446. {
  1447. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1448. return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
  1449. }
  1450. static ssize_t show_cmd_sg_entries(struct device *dev,
  1451. struct device_attribute *attr, char *buf)
  1452. {
  1453. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1454. return sprintf(buf, "%u\n", target->cmd_sg_cnt);
  1455. }
  1456. static ssize_t show_allow_ext_sg(struct device *dev,
  1457. struct device_attribute *attr, char *buf)
  1458. {
  1459. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1460. return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
  1461. }
  1462. static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
  1463. static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
  1464. static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
  1465. static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
  1466. static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
  1467. static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
  1468. static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
  1469. static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
  1470. static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
  1471. static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
  1472. static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
  1473. static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
  1474. static struct device_attribute *srp_host_attrs[] = {
  1475. &dev_attr_id_ext,
  1476. &dev_attr_ioc_guid,
  1477. &dev_attr_service_id,
  1478. &dev_attr_pkey,
  1479. &dev_attr_dgid,
  1480. &dev_attr_orig_dgid,
  1481. &dev_attr_req_lim,
  1482. &dev_attr_zero_req_lim,
  1483. &dev_attr_local_ib_port,
  1484. &dev_attr_local_ib_device,
  1485. &dev_attr_cmd_sg_entries,
  1486. &dev_attr_allow_ext_sg,
  1487. NULL
  1488. };
  1489. static struct scsi_host_template srp_template = {
  1490. .module = THIS_MODULE,
  1491. .name = "InfiniBand SRP initiator",
  1492. .proc_name = DRV_NAME,
  1493. .info = srp_target_info,
  1494. .queuecommand = srp_queuecommand,
  1495. .eh_abort_handler = srp_abort,
  1496. .eh_device_reset_handler = srp_reset_device,
  1497. .eh_host_reset_handler = srp_reset_host,
  1498. .sg_tablesize = SRP_DEF_SG_TABLESIZE,
  1499. .can_queue = SRP_CMD_SQ_SIZE,
  1500. .this_id = -1,
  1501. .cmd_per_lun = SRP_CMD_SQ_SIZE,
  1502. .use_clustering = ENABLE_CLUSTERING,
  1503. .shost_attrs = srp_host_attrs
  1504. };
  1505. static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
  1506. {
  1507. struct srp_rport_identifiers ids;
  1508. struct srp_rport *rport;
  1509. sprintf(target->target_name, "SRP.T10:%016llX",
  1510. (unsigned long long) be64_to_cpu(target->id_ext));
  1511. if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
  1512. return -ENODEV;
  1513. memcpy(ids.port_id, &target->id_ext, 8);
  1514. memcpy(ids.port_id + 8, &target->ioc_guid, 8);
  1515. ids.roles = SRP_RPORT_ROLE_TARGET;
  1516. rport = srp_rport_add(target->scsi_host, &ids);
  1517. if (IS_ERR(rport)) {
  1518. scsi_remove_host(target->scsi_host);
  1519. return PTR_ERR(rport);
  1520. }
  1521. spin_lock(&host->target_lock);
  1522. list_add_tail(&target->list, &host->target_list);
  1523. spin_unlock(&host->target_lock);
  1524. target->state = SRP_TARGET_LIVE;
  1525. scsi_scan_target(&target->scsi_host->shost_gendev,
  1526. 0, target->scsi_id, SCAN_WILD_CARD, 0);
  1527. return 0;
  1528. }
  1529. static void srp_release_dev(struct device *dev)
  1530. {
  1531. struct srp_host *host =
  1532. container_of(dev, struct srp_host, dev);
  1533. complete(&host->released);
  1534. }
  1535. static struct class srp_class = {
  1536. .name = "infiniband_srp",
  1537. .dev_release = srp_release_dev
  1538. };
  1539. /*
  1540. * Target ports are added by writing
  1541. *
  1542. * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
  1543. * pkey=<P_Key>,service_id=<service ID>
  1544. *
  1545. * to the add_target sysfs attribute.
  1546. */
  1547. enum {
  1548. SRP_OPT_ERR = 0,
  1549. SRP_OPT_ID_EXT = 1 << 0,
  1550. SRP_OPT_IOC_GUID = 1 << 1,
  1551. SRP_OPT_DGID = 1 << 2,
  1552. SRP_OPT_PKEY = 1 << 3,
  1553. SRP_OPT_SERVICE_ID = 1 << 4,
  1554. SRP_OPT_MAX_SECT = 1 << 5,
  1555. SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
  1556. SRP_OPT_IO_CLASS = 1 << 7,
  1557. SRP_OPT_INITIATOR_EXT = 1 << 8,
  1558. SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
  1559. SRP_OPT_ALLOW_EXT_SG = 1 << 10,
  1560. SRP_OPT_SG_TABLESIZE = 1 << 11,
  1561. SRP_OPT_ALL = (SRP_OPT_ID_EXT |
  1562. SRP_OPT_IOC_GUID |
  1563. SRP_OPT_DGID |
  1564. SRP_OPT_PKEY |
  1565. SRP_OPT_SERVICE_ID),
  1566. };
  1567. static const match_table_t srp_opt_tokens = {
  1568. { SRP_OPT_ID_EXT, "id_ext=%s" },
  1569. { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
  1570. { SRP_OPT_DGID, "dgid=%s" },
  1571. { SRP_OPT_PKEY, "pkey=%x" },
  1572. { SRP_OPT_SERVICE_ID, "service_id=%s" },
  1573. { SRP_OPT_MAX_SECT, "max_sect=%d" },
  1574. { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
  1575. { SRP_OPT_IO_CLASS, "io_class=%x" },
  1576. { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
  1577. { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
  1578. { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
  1579. { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
  1580. { SRP_OPT_ERR, NULL }
  1581. };
  1582. static int srp_parse_options(const char *buf, struct srp_target_port *target)
  1583. {
  1584. char *options, *sep_opt;
  1585. char *p;
  1586. char dgid[3];
  1587. substring_t args[MAX_OPT_ARGS];
  1588. int opt_mask = 0;
  1589. int token;
  1590. int ret = -EINVAL;
  1591. int i;
  1592. options = kstrdup(buf, GFP_KERNEL);
  1593. if (!options)
  1594. return -ENOMEM;
  1595. sep_opt = options;
  1596. while ((p = strsep(&sep_opt, ",")) != NULL) {
  1597. if (!*p)
  1598. continue;
  1599. token = match_token(p, srp_opt_tokens, args);
  1600. opt_mask |= token;
  1601. switch (token) {
  1602. case SRP_OPT_ID_EXT:
  1603. p = match_strdup(args);
  1604. if (!p) {
  1605. ret = -ENOMEM;
  1606. goto out;
  1607. }
  1608. target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1609. kfree(p);
  1610. break;
  1611. case SRP_OPT_IOC_GUID:
  1612. p = match_strdup(args);
  1613. if (!p) {
  1614. ret = -ENOMEM;
  1615. goto out;
  1616. }
  1617. target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1618. kfree(p);
  1619. break;
  1620. case SRP_OPT_DGID:
  1621. p = match_strdup(args);
  1622. if (!p) {
  1623. ret = -ENOMEM;
  1624. goto out;
  1625. }
  1626. if (strlen(p) != 32) {
  1627. pr_warn("bad dest GID parameter '%s'\n", p);
  1628. kfree(p);
  1629. goto out;
  1630. }
  1631. for (i = 0; i < 16; ++i) {
  1632. strlcpy(dgid, p + i * 2, 3);
  1633. target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
  1634. }
  1635. kfree(p);
  1636. memcpy(target->orig_dgid, target->path.dgid.raw, 16);
  1637. break;
  1638. case SRP_OPT_PKEY:
  1639. if (match_hex(args, &token)) {
  1640. pr_warn("bad P_Key parameter '%s'\n", p);
  1641. goto out;
  1642. }
  1643. target->path.pkey = cpu_to_be16(token);
  1644. break;
  1645. case SRP_OPT_SERVICE_ID:
  1646. p = match_strdup(args);
  1647. if (!p) {
  1648. ret = -ENOMEM;
  1649. goto out;
  1650. }
  1651. target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1652. target->path.service_id = target->service_id;
  1653. kfree(p);
  1654. break;
  1655. case SRP_OPT_MAX_SECT:
  1656. if (match_int(args, &token)) {
  1657. pr_warn("bad max sect parameter '%s'\n", p);
  1658. goto out;
  1659. }
  1660. target->scsi_host->max_sectors = token;
  1661. break;
  1662. case SRP_OPT_MAX_CMD_PER_LUN:
  1663. if (match_int(args, &token)) {
  1664. pr_warn("bad max cmd_per_lun parameter '%s'\n",
  1665. p);
  1666. goto out;
  1667. }
  1668. target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
  1669. break;
  1670. case SRP_OPT_IO_CLASS:
  1671. if (match_hex(args, &token)) {
  1672. pr_warn("bad IO class parameter '%s'\n", p);
  1673. goto out;
  1674. }
  1675. if (token != SRP_REV10_IB_IO_CLASS &&
  1676. token != SRP_REV16A_IB_IO_CLASS) {
  1677. pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
  1678. token, SRP_REV10_IB_IO_CLASS,
  1679. SRP_REV16A_IB_IO_CLASS);
  1680. goto out;
  1681. }
  1682. target->io_class = token;
  1683. break;
  1684. case SRP_OPT_INITIATOR_EXT:
  1685. p = match_strdup(args);
  1686. if (!p) {
  1687. ret = -ENOMEM;
  1688. goto out;
  1689. }
  1690. target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1691. kfree(p);
  1692. break;
  1693. case SRP_OPT_CMD_SG_ENTRIES:
  1694. if (match_int(args, &token) || token < 1 || token > 255) {
  1695. pr_warn("bad max cmd_sg_entries parameter '%s'\n",
  1696. p);
  1697. goto out;
  1698. }
  1699. target->cmd_sg_cnt = token;
  1700. break;
  1701. case SRP_OPT_ALLOW_EXT_SG:
  1702. if (match_int(args, &token)) {
  1703. pr_warn("bad allow_ext_sg parameter '%s'\n", p);
  1704. goto out;
  1705. }
  1706. target->allow_ext_sg = !!token;
  1707. break;
  1708. case SRP_OPT_SG_TABLESIZE:
  1709. if (match_int(args, &token) || token < 1 ||
  1710. token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
  1711. pr_warn("bad max sg_tablesize parameter '%s'\n",
  1712. p);
  1713. goto out;
  1714. }
  1715. target->sg_tablesize = token;
  1716. break;
  1717. default:
  1718. pr_warn("unknown parameter or missing value '%s' in target creation request\n",
  1719. p);
  1720. goto out;
  1721. }
  1722. }
  1723. if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
  1724. ret = 0;
  1725. else
  1726. for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
  1727. if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
  1728. !(srp_opt_tokens[i].token & opt_mask))
  1729. pr_warn("target creation request is missing parameter '%s'\n",
  1730. srp_opt_tokens[i].pattern);
  1731. out:
  1732. kfree(options);
  1733. return ret;
  1734. }
  1735. static ssize_t srp_create_target(struct device *dev,
  1736. struct device_attribute *attr,
  1737. const char *buf, size_t count)
  1738. {
  1739. struct srp_host *host =
  1740. container_of(dev, struct srp_host, dev);
  1741. struct Scsi_Host *target_host;
  1742. struct srp_target_port *target;
  1743. struct ib_device *ibdev = host->srp_dev->dev;
  1744. dma_addr_t dma_addr;
  1745. int i, ret;
  1746. target_host = scsi_host_alloc(&srp_template,
  1747. sizeof (struct srp_target_port));
  1748. if (!target_host)
  1749. return -ENOMEM;
  1750. target_host->transportt = ib_srp_transport_template;
  1751. target_host->max_channel = 0;
  1752. target_host->max_id = 1;
  1753. target_host->max_lun = SRP_MAX_LUN;
  1754. target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
  1755. target = host_to_target(target_host);
  1756. target->io_class = SRP_REV16A_IB_IO_CLASS;
  1757. target->scsi_host = target_host;
  1758. target->srp_host = host;
  1759. target->lkey = host->srp_dev->mr->lkey;
  1760. target->rkey = host->srp_dev->mr->rkey;
  1761. target->cmd_sg_cnt = cmd_sg_entries;
  1762. target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
  1763. target->allow_ext_sg = allow_ext_sg;
  1764. ret = srp_parse_options(buf, target);
  1765. if (ret)
  1766. goto err;
  1767. if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
  1768. target->cmd_sg_cnt < target->sg_tablesize) {
  1769. pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
  1770. target->sg_tablesize = target->cmd_sg_cnt;
  1771. }
  1772. target_host->sg_tablesize = target->sg_tablesize;
  1773. target->indirect_size = target->sg_tablesize *
  1774. sizeof (struct srp_direct_buf);
  1775. target->max_iu_len = sizeof (struct srp_cmd) +
  1776. sizeof (struct srp_indirect_buf) +
  1777. target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
  1778. spin_lock_init(&target->lock);
  1779. INIT_LIST_HEAD(&target->free_tx);
  1780. INIT_LIST_HEAD(&target->free_reqs);
  1781. for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
  1782. struct srp_request *req = &target->req_ring[i];
  1783. req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
  1784. GFP_KERNEL);
  1785. req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
  1786. GFP_KERNEL);
  1787. req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
  1788. if (!req->fmr_list || !req->map_page || !req->indirect_desc)
  1789. goto err_free_mem;
  1790. dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
  1791. target->indirect_size,
  1792. DMA_TO_DEVICE);
  1793. if (ib_dma_mapping_error(ibdev, dma_addr))
  1794. goto err_free_mem;
  1795. req->indirect_dma_addr = dma_addr;
  1796. req->index = i;
  1797. list_add_tail(&req->list, &target->free_reqs);
  1798. }
  1799. ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
  1800. shost_printk(KERN_DEBUG, target->scsi_host, PFX
  1801. "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
  1802. "service_id %016llx dgid %pI6\n",
  1803. (unsigned long long) be64_to_cpu(target->id_ext),
  1804. (unsigned long long) be64_to_cpu(target->ioc_guid),
  1805. be16_to_cpu(target->path.pkey),
  1806. (unsigned long long) be64_to_cpu(target->service_id),
  1807. target->path.dgid.raw);
  1808. ret = srp_create_target_ib(target);
  1809. if (ret)
  1810. goto err_free_mem;
  1811. ret = srp_new_cm_id(target);
  1812. if (ret)
  1813. goto err_free_ib;
  1814. target->qp_in_error = 0;
  1815. ret = srp_connect_target(target);
  1816. if (ret) {
  1817. shost_printk(KERN_ERR, target->scsi_host,
  1818. PFX "Connection failed\n");
  1819. goto err_cm_id;
  1820. }
  1821. ret = srp_add_target(host, target);
  1822. if (ret)
  1823. goto err_disconnect;
  1824. return count;
  1825. err_disconnect:
  1826. srp_disconnect_target(target);
  1827. err_cm_id:
  1828. ib_destroy_cm_id(target->cm_id);
  1829. err_free_ib:
  1830. srp_free_target_ib(target);
  1831. err_free_mem:
  1832. srp_free_req_data(target);
  1833. err:
  1834. scsi_host_put(target_host);
  1835. return ret;
  1836. }
  1837. static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
  1838. static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
  1839. char *buf)
  1840. {
  1841. struct srp_host *host = container_of(dev, struct srp_host, dev);
  1842. return sprintf(buf, "%s\n", host->srp_dev->dev->name);
  1843. }
  1844. static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
  1845. static ssize_t show_port(struct device *dev, struct device_attribute *attr,
  1846. char *buf)
  1847. {
  1848. struct srp_host *host = container_of(dev, struct srp_host, dev);
  1849. return sprintf(buf, "%d\n", host->port);
  1850. }
  1851. static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
  1852. static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
  1853. {
  1854. struct srp_host *host;
  1855. host = kzalloc(sizeof *host, GFP_KERNEL);
  1856. if (!host)
  1857. return NULL;
  1858. INIT_LIST_HEAD(&host->target_list);
  1859. spin_lock_init(&host->target_lock);
  1860. init_completion(&host->released);
  1861. host->srp_dev = device;
  1862. host->port = port;
  1863. host->dev.class = &srp_class;
  1864. host->dev.parent = device->dev->dma_device;
  1865. dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
  1866. if (device_register(&host->dev))
  1867. goto free_host;
  1868. if (device_create_file(&host->dev, &dev_attr_add_target))
  1869. goto err_class;
  1870. if (device_create_file(&host->dev, &dev_attr_ibdev))
  1871. goto err_class;
  1872. if (device_create_file(&host->dev, &dev_attr_port))
  1873. goto err_class;
  1874. return host;
  1875. err_class:
  1876. device_unregister(&host->dev);
  1877. free_host:
  1878. kfree(host);
  1879. return NULL;
  1880. }
  1881. static void srp_add_one(struct ib_device *device)
  1882. {
  1883. struct srp_device *srp_dev;
  1884. struct ib_device_attr *dev_attr;
  1885. struct ib_fmr_pool_param fmr_param;
  1886. struct srp_host *host;
  1887. int max_pages_per_fmr, fmr_page_shift, s, e, p;
  1888. dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
  1889. if (!dev_attr)
  1890. return;
  1891. if (ib_query_device(device, dev_attr)) {
  1892. pr_warn("Query device failed for %s\n", device->name);
  1893. goto free_attr;
  1894. }
  1895. srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
  1896. if (!srp_dev)
  1897. goto free_attr;
  1898. /*
  1899. * Use the smallest page size supported by the HCA, down to a
  1900. * minimum of 4096 bytes. We're unlikely to build large sglists
  1901. * out of smaller entries.
  1902. */
  1903. fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
  1904. srp_dev->fmr_page_size = 1 << fmr_page_shift;
  1905. srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
  1906. srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
  1907. INIT_LIST_HEAD(&srp_dev->dev_list);
  1908. srp_dev->dev = device;
  1909. srp_dev->pd = ib_alloc_pd(device);
  1910. if (IS_ERR(srp_dev->pd))
  1911. goto free_dev;
  1912. srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
  1913. IB_ACCESS_LOCAL_WRITE |
  1914. IB_ACCESS_REMOTE_READ |
  1915. IB_ACCESS_REMOTE_WRITE);
  1916. if (IS_ERR(srp_dev->mr))
  1917. goto err_pd;
  1918. for (max_pages_per_fmr = SRP_FMR_SIZE;
  1919. max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
  1920. max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
  1921. memset(&fmr_param, 0, sizeof fmr_param);
  1922. fmr_param.pool_size = SRP_FMR_POOL_SIZE;
  1923. fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
  1924. fmr_param.cache = 1;
  1925. fmr_param.max_pages_per_fmr = max_pages_per_fmr;
  1926. fmr_param.page_shift = fmr_page_shift;
  1927. fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
  1928. IB_ACCESS_REMOTE_WRITE |
  1929. IB_ACCESS_REMOTE_READ);
  1930. srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
  1931. if (!IS_ERR(srp_dev->fmr_pool))
  1932. break;
  1933. }
  1934. if (IS_ERR(srp_dev->fmr_pool))
  1935. srp_dev->fmr_pool = NULL;
  1936. if (device->node_type == RDMA_NODE_IB_SWITCH) {
  1937. s = 0;
  1938. e = 0;
  1939. } else {
  1940. s = 1;
  1941. e = device->phys_port_cnt;
  1942. }
  1943. for (p = s; p <= e; ++p) {
  1944. host = srp_add_port(srp_dev, p);
  1945. if (host)
  1946. list_add_tail(&host->list, &srp_dev->dev_list);
  1947. }
  1948. ib_set_client_data(device, &srp_client, srp_dev);
  1949. goto free_attr;
  1950. err_pd:
  1951. ib_dealloc_pd(srp_dev->pd);
  1952. free_dev:
  1953. kfree(srp_dev);
  1954. free_attr:
  1955. kfree(dev_attr);
  1956. }
  1957. static void srp_remove_one(struct ib_device *device)
  1958. {
  1959. struct srp_device *srp_dev;
  1960. struct srp_host *host, *tmp_host;
  1961. LIST_HEAD(target_list);
  1962. struct srp_target_port *target, *tmp_target;
  1963. srp_dev = ib_get_client_data(device, &srp_client);
  1964. list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
  1965. device_unregister(&host->dev);
  1966. /*
  1967. * Wait for the sysfs entry to go away, so that no new
  1968. * target ports can be created.
  1969. */
  1970. wait_for_completion(&host->released);
  1971. /*
  1972. * Mark all target ports as removed, so we stop queueing
  1973. * commands and don't try to reconnect.
  1974. */
  1975. spin_lock(&host->target_lock);
  1976. list_for_each_entry(target, &host->target_list, list) {
  1977. spin_lock_irq(&target->lock);
  1978. target->state = SRP_TARGET_REMOVED;
  1979. spin_unlock_irq(&target->lock);
  1980. }
  1981. spin_unlock(&host->target_lock);
  1982. /*
  1983. * Wait for any reconnection tasks that may have
  1984. * started before we marked our target ports as
  1985. * removed, and any target port removal tasks.
  1986. */
  1987. flush_workqueue(ib_wq);
  1988. list_for_each_entry_safe(target, tmp_target,
  1989. &host->target_list, list) {
  1990. srp_del_scsi_host_attr(target->scsi_host);
  1991. srp_remove_host(target->scsi_host);
  1992. scsi_remove_host(target->scsi_host);
  1993. srp_disconnect_target(target);
  1994. ib_destroy_cm_id(target->cm_id);
  1995. srp_free_target_ib(target);
  1996. srp_free_req_data(target);
  1997. scsi_host_put(target->scsi_host);
  1998. }
  1999. kfree(host);
  2000. }
  2001. if (srp_dev->fmr_pool)
  2002. ib_destroy_fmr_pool(srp_dev->fmr_pool);
  2003. ib_dereg_mr(srp_dev->mr);
  2004. ib_dealloc_pd(srp_dev->pd);
  2005. kfree(srp_dev);
  2006. }
  2007. static struct srp_function_template ib_srp_transport_functions = {
  2008. };
  2009. static int __init srp_init_module(void)
  2010. {
  2011. int ret;
  2012. BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
  2013. if (srp_sg_tablesize) {
  2014. pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
  2015. if (!cmd_sg_entries)
  2016. cmd_sg_entries = srp_sg_tablesize;
  2017. }
  2018. if (!cmd_sg_entries)
  2019. cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
  2020. if (cmd_sg_entries > 255) {
  2021. pr_warn("Clamping cmd_sg_entries to 255\n");
  2022. cmd_sg_entries = 255;
  2023. }
  2024. if (!indirect_sg_entries)
  2025. indirect_sg_entries = cmd_sg_entries;
  2026. else if (indirect_sg_entries < cmd_sg_entries) {
  2027. pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
  2028. cmd_sg_entries);
  2029. indirect_sg_entries = cmd_sg_entries;
  2030. }
  2031. ib_srp_transport_template =
  2032. srp_attach_transport(&ib_srp_transport_functions);
  2033. if (!ib_srp_transport_template)
  2034. return -ENOMEM;
  2035. ret = class_register(&srp_class);
  2036. if (ret) {
  2037. pr_err("couldn't register class infiniband_srp\n");
  2038. srp_release_transport(ib_srp_transport_template);
  2039. return ret;
  2040. }
  2041. ib_sa_register_client(&srp_sa_client);
  2042. ret = ib_register_client(&srp_client);
  2043. if (ret) {
  2044. pr_err("couldn't register IB client\n");
  2045. srp_release_transport(ib_srp_transport_template);
  2046. ib_sa_unregister_client(&srp_sa_client);
  2047. class_unregister(&srp_class);
  2048. return ret;
  2049. }
  2050. return 0;
  2051. }
  2052. static void __exit srp_cleanup_module(void)
  2053. {
  2054. ib_unregister_client(&srp_client);
  2055. ib_sa_unregister_client(&srp_sa_client);
  2056. class_unregister(&srp_class);
  2057. srp_release_transport(ib_srp_transport_template);
  2058. }
  2059. module_init(srp_init_module);
  2060. module_exit(srp_cleanup_module);