ib_srp.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592
  1. /*
  2. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #define pr_fmt(fmt) PFX fmt
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/err.h>
  37. #include <linux/string.h>
  38. #include <linux/parser.h>
  39. #include <linux/random.h>
  40. #include <linux/jiffies.h>
  41. #include <linux/atomic.h>
  42. #include <scsi/scsi.h>
  43. #include <scsi/scsi_device.h>
  44. #include <scsi/scsi_dbg.h>
  45. #include <scsi/srp.h>
  46. #include <scsi/scsi_transport_srp.h>
  47. #include "ib_srp.h"
  48. #define DRV_NAME "ib_srp"
  49. #define PFX DRV_NAME ": "
  50. #define DRV_VERSION "0.2"
  51. #define DRV_RELDATE "November 1, 2005"
  52. MODULE_AUTHOR("Roland Dreier");
  53. MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
  54. "v" DRV_VERSION " (" DRV_RELDATE ")");
  55. MODULE_LICENSE("Dual BSD/GPL");
  56. static unsigned int srp_sg_tablesize;
  57. static unsigned int cmd_sg_entries;
  58. static unsigned int indirect_sg_entries;
  59. static bool allow_ext_sg;
  60. static int topspin_workarounds = 1;
  61. module_param(srp_sg_tablesize, uint, 0444);
  62. MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
  63. module_param(cmd_sg_entries, uint, 0444);
  64. MODULE_PARM_DESC(cmd_sg_entries,
  65. "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
  66. module_param(indirect_sg_entries, uint, 0444);
  67. MODULE_PARM_DESC(indirect_sg_entries,
  68. "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
  69. module_param(allow_ext_sg, bool, 0444);
  70. MODULE_PARM_DESC(allow_ext_sg,
  71. "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
  72. module_param(topspin_workarounds, int, 0444);
  73. MODULE_PARM_DESC(topspin_workarounds,
  74. "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
  75. static void srp_add_one(struct ib_device *device);
  76. static void srp_remove_one(struct ib_device *device);
  77. static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
  78. static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
  79. static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
  80. static struct scsi_transport_template *ib_srp_transport_template;
  81. static struct ib_client srp_client = {
  82. .name = "srp",
  83. .add = srp_add_one,
  84. .remove = srp_remove_one
  85. };
  86. static struct ib_sa_client srp_sa_client;
  87. static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
  88. {
  89. return (struct srp_target_port *) host->hostdata;
  90. }
  91. static const char *srp_target_info(struct Scsi_Host *host)
  92. {
  93. return host_to_target(host)->target_name;
  94. }
  95. static int srp_target_is_topspin(struct srp_target_port *target)
  96. {
  97. static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
  98. static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
  99. return topspin_workarounds &&
  100. (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
  101. !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
  102. }
  103. static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
  104. gfp_t gfp_mask,
  105. enum dma_data_direction direction)
  106. {
  107. struct srp_iu *iu;
  108. iu = kmalloc(sizeof *iu, gfp_mask);
  109. if (!iu)
  110. goto out;
  111. iu->buf = kzalloc(size, gfp_mask);
  112. if (!iu->buf)
  113. goto out_free_iu;
  114. iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
  115. direction);
  116. if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
  117. goto out_free_buf;
  118. iu->size = size;
  119. iu->direction = direction;
  120. return iu;
  121. out_free_buf:
  122. kfree(iu->buf);
  123. out_free_iu:
  124. kfree(iu);
  125. out:
  126. return NULL;
  127. }
  128. static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
  129. {
  130. if (!iu)
  131. return;
  132. ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
  133. iu->direction);
  134. kfree(iu->buf);
  135. kfree(iu);
  136. }
  137. static void srp_qp_event(struct ib_event *event, void *context)
  138. {
  139. pr_debug("QP event %d\n", event->event);
  140. }
  141. static int srp_init_qp(struct srp_target_port *target,
  142. struct ib_qp *qp)
  143. {
  144. struct ib_qp_attr *attr;
  145. int ret;
  146. attr = kmalloc(sizeof *attr, GFP_KERNEL);
  147. if (!attr)
  148. return -ENOMEM;
  149. ret = ib_find_pkey(target->srp_host->srp_dev->dev,
  150. target->srp_host->port,
  151. be16_to_cpu(target->path.pkey),
  152. &attr->pkey_index);
  153. if (ret)
  154. goto out;
  155. attr->qp_state = IB_QPS_INIT;
  156. attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
  157. IB_ACCESS_REMOTE_WRITE);
  158. attr->port_num = target->srp_host->port;
  159. ret = ib_modify_qp(qp, attr,
  160. IB_QP_STATE |
  161. IB_QP_PKEY_INDEX |
  162. IB_QP_ACCESS_FLAGS |
  163. IB_QP_PORT);
  164. out:
  165. kfree(attr);
  166. return ret;
  167. }
  168. static int srp_new_cm_id(struct srp_target_port *target)
  169. {
  170. struct ib_cm_id *new_cm_id;
  171. new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
  172. srp_cm_handler, target);
  173. if (IS_ERR(new_cm_id))
  174. return PTR_ERR(new_cm_id);
  175. if (target->cm_id)
  176. ib_destroy_cm_id(target->cm_id);
  177. target->cm_id = new_cm_id;
  178. return 0;
  179. }
  180. static int srp_create_target_ib(struct srp_target_port *target)
  181. {
  182. struct ib_qp_init_attr *init_attr;
  183. struct ib_cq *recv_cq, *send_cq;
  184. struct ib_qp *qp;
  185. int ret;
  186. init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
  187. if (!init_attr)
  188. return -ENOMEM;
  189. recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
  190. srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
  191. if (IS_ERR(recv_cq)) {
  192. ret = PTR_ERR(recv_cq);
  193. goto err;
  194. }
  195. send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
  196. srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
  197. if (IS_ERR(send_cq)) {
  198. ret = PTR_ERR(send_cq);
  199. goto err_recv_cq;
  200. }
  201. ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
  202. init_attr->event_handler = srp_qp_event;
  203. init_attr->cap.max_send_wr = SRP_SQ_SIZE;
  204. init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
  205. init_attr->cap.max_recv_sge = 1;
  206. init_attr->cap.max_send_sge = 1;
  207. init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
  208. init_attr->qp_type = IB_QPT_RC;
  209. init_attr->send_cq = send_cq;
  210. init_attr->recv_cq = recv_cq;
  211. qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
  212. if (IS_ERR(qp)) {
  213. ret = PTR_ERR(qp);
  214. goto err_send_cq;
  215. }
  216. ret = srp_init_qp(target, qp);
  217. if (ret)
  218. goto err_qp;
  219. if (target->qp)
  220. ib_destroy_qp(target->qp);
  221. if (target->recv_cq)
  222. ib_destroy_cq(target->recv_cq);
  223. if (target->send_cq)
  224. ib_destroy_cq(target->send_cq);
  225. target->qp = qp;
  226. target->recv_cq = recv_cq;
  227. target->send_cq = send_cq;
  228. kfree(init_attr);
  229. return 0;
  230. err_qp:
  231. ib_destroy_qp(qp);
  232. err_send_cq:
  233. ib_destroy_cq(send_cq);
  234. err_recv_cq:
  235. ib_destroy_cq(recv_cq);
  236. err:
  237. kfree(init_attr);
  238. return ret;
  239. }
  240. static void srp_free_target_ib(struct srp_target_port *target)
  241. {
  242. int i;
  243. ib_destroy_qp(target->qp);
  244. ib_destroy_cq(target->send_cq);
  245. ib_destroy_cq(target->recv_cq);
  246. target->qp = NULL;
  247. target->send_cq = target->recv_cq = NULL;
  248. for (i = 0; i < SRP_RQ_SIZE; ++i)
  249. srp_free_iu(target->srp_host, target->rx_ring[i]);
  250. for (i = 0; i < SRP_SQ_SIZE; ++i)
  251. srp_free_iu(target->srp_host, target->tx_ring[i]);
  252. }
  253. static void srp_path_rec_completion(int status,
  254. struct ib_sa_path_rec *pathrec,
  255. void *target_ptr)
  256. {
  257. struct srp_target_port *target = target_ptr;
  258. target->status = status;
  259. if (status)
  260. shost_printk(KERN_ERR, target->scsi_host,
  261. PFX "Got failed path rec status %d\n", status);
  262. else
  263. target->path = *pathrec;
  264. complete(&target->done);
  265. }
  266. static int srp_lookup_path(struct srp_target_port *target)
  267. {
  268. target->path.numb_path = 1;
  269. init_completion(&target->done);
  270. target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
  271. target->srp_host->srp_dev->dev,
  272. target->srp_host->port,
  273. &target->path,
  274. IB_SA_PATH_REC_SERVICE_ID |
  275. IB_SA_PATH_REC_DGID |
  276. IB_SA_PATH_REC_SGID |
  277. IB_SA_PATH_REC_NUMB_PATH |
  278. IB_SA_PATH_REC_PKEY,
  279. SRP_PATH_REC_TIMEOUT_MS,
  280. GFP_KERNEL,
  281. srp_path_rec_completion,
  282. target, &target->path_query);
  283. if (target->path_query_id < 0)
  284. return target->path_query_id;
  285. wait_for_completion(&target->done);
  286. if (target->status < 0)
  287. shost_printk(KERN_WARNING, target->scsi_host,
  288. PFX "Path record query failed\n");
  289. return target->status;
  290. }
  291. static int srp_send_req(struct srp_target_port *target)
  292. {
  293. struct {
  294. struct ib_cm_req_param param;
  295. struct srp_login_req priv;
  296. } *req = NULL;
  297. int status;
  298. req = kzalloc(sizeof *req, GFP_KERNEL);
  299. if (!req)
  300. return -ENOMEM;
  301. req->param.primary_path = &target->path;
  302. req->param.alternate_path = NULL;
  303. req->param.service_id = target->service_id;
  304. req->param.qp_num = target->qp->qp_num;
  305. req->param.qp_type = target->qp->qp_type;
  306. req->param.private_data = &req->priv;
  307. req->param.private_data_len = sizeof req->priv;
  308. req->param.flow_control = 1;
  309. get_random_bytes(&req->param.starting_psn, 4);
  310. req->param.starting_psn &= 0xffffff;
  311. /*
  312. * Pick some arbitrary defaults here; we could make these
  313. * module parameters if anyone cared about setting them.
  314. */
  315. req->param.responder_resources = 4;
  316. req->param.remote_cm_response_timeout = 20;
  317. req->param.local_cm_response_timeout = 20;
  318. req->param.retry_count = 7;
  319. req->param.rnr_retry_count = 7;
  320. req->param.max_cm_retries = 15;
  321. req->priv.opcode = SRP_LOGIN_REQ;
  322. req->priv.tag = 0;
  323. req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
  324. req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
  325. SRP_BUF_FORMAT_INDIRECT);
  326. /*
  327. * In the published SRP specification (draft rev. 16a), the
  328. * port identifier format is 8 bytes of ID extension followed
  329. * by 8 bytes of GUID. Older drafts put the two halves in the
  330. * opposite order, so that the GUID comes first.
  331. *
  332. * Targets conforming to these obsolete drafts can be
  333. * recognized by the I/O Class they report.
  334. */
  335. if (target->io_class == SRP_REV10_IB_IO_CLASS) {
  336. memcpy(req->priv.initiator_port_id,
  337. &target->path.sgid.global.interface_id, 8);
  338. memcpy(req->priv.initiator_port_id + 8,
  339. &target->initiator_ext, 8);
  340. memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
  341. memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
  342. } else {
  343. memcpy(req->priv.initiator_port_id,
  344. &target->initiator_ext, 8);
  345. memcpy(req->priv.initiator_port_id + 8,
  346. &target->path.sgid.global.interface_id, 8);
  347. memcpy(req->priv.target_port_id, &target->id_ext, 8);
  348. memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
  349. }
  350. /*
  351. * Topspin/Cisco SRP targets will reject our login unless we
  352. * zero out the first 8 bytes of our initiator port ID and set
  353. * the second 8 bytes to the local node GUID.
  354. */
  355. if (srp_target_is_topspin(target)) {
  356. shost_printk(KERN_DEBUG, target->scsi_host,
  357. PFX "Topspin/Cisco initiator port ID workaround "
  358. "activated for target GUID %016llx\n",
  359. (unsigned long long) be64_to_cpu(target->ioc_guid));
  360. memset(req->priv.initiator_port_id, 0, 8);
  361. memcpy(req->priv.initiator_port_id + 8,
  362. &target->srp_host->srp_dev->dev->node_guid, 8);
  363. }
  364. status = ib_send_cm_req(target->cm_id, &req->param);
  365. kfree(req);
  366. return status;
  367. }
  368. static bool srp_queue_remove_work(struct srp_target_port *target)
  369. {
  370. bool changed = false;
  371. spin_lock_irq(&target->lock);
  372. if (target->state != SRP_TARGET_REMOVED) {
  373. target->state = SRP_TARGET_REMOVED;
  374. changed = true;
  375. }
  376. spin_unlock_irq(&target->lock);
  377. if (changed)
  378. queue_work(system_long_wq, &target->remove_work);
  379. return changed;
  380. }
  381. static bool srp_change_conn_state(struct srp_target_port *target,
  382. bool connected)
  383. {
  384. bool changed = false;
  385. spin_lock_irq(&target->lock);
  386. if (target->connected != connected) {
  387. target->connected = connected;
  388. changed = true;
  389. }
  390. spin_unlock_irq(&target->lock);
  391. return changed;
  392. }
  393. static void srp_disconnect_target(struct srp_target_port *target)
  394. {
  395. if (srp_change_conn_state(target, false)) {
  396. /* XXX should send SRP_I_LOGOUT request */
  397. if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
  398. shost_printk(KERN_DEBUG, target->scsi_host,
  399. PFX "Sending CM DREQ failed\n");
  400. }
  401. }
  402. }
  403. static void srp_free_req_data(struct srp_target_port *target)
  404. {
  405. struct ib_device *ibdev = target->srp_host->srp_dev->dev;
  406. struct srp_request *req;
  407. int i;
  408. for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
  409. kfree(req->fmr_list);
  410. kfree(req->map_page);
  411. if (req->indirect_dma_addr) {
  412. ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
  413. target->indirect_size,
  414. DMA_TO_DEVICE);
  415. }
  416. kfree(req->indirect_desc);
  417. }
  418. }
  419. /**
  420. * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
  421. * @shost: SCSI host whose attributes to remove from sysfs.
  422. *
  423. * Note: Any attributes defined in the host template and that did not exist
  424. * before invocation of this function will be ignored.
  425. */
  426. static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
  427. {
  428. struct device_attribute **attr;
  429. for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
  430. device_remove_file(&shost->shost_dev, *attr);
  431. }
  432. static void srp_remove_target(struct srp_target_port *target)
  433. {
  434. WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
  435. srp_del_scsi_host_attr(target->scsi_host);
  436. srp_remove_host(target->scsi_host);
  437. scsi_remove_host(target->scsi_host);
  438. srp_disconnect_target(target);
  439. ib_destroy_cm_id(target->cm_id);
  440. srp_free_target_ib(target);
  441. srp_free_req_data(target);
  442. scsi_host_put(target->scsi_host);
  443. }
  444. static void srp_remove_work(struct work_struct *work)
  445. {
  446. struct srp_target_port *target =
  447. container_of(work, struct srp_target_port, remove_work);
  448. WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
  449. spin_lock(&target->srp_host->target_lock);
  450. list_del(&target->list);
  451. spin_unlock(&target->srp_host->target_lock);
  452. srp_remove_target(target);
  453. }
  454. static int srp_connect_target(struct srp_target_port *target)
  455. {
  456. int retries = 3;
  457. int ret;
  458. WARN_ON_ONCE(target->connected);
  459. target->qp_in_error = false;
  460. ret = srp_lookup_path(target);
  461. if (ret)
  462. return ret;
  463. while (1) {
  464. init_completion(&target->done);
  465. ret = srp_send_req(target);
  466. if (ret)
  467. return ret;
  468. wait_for_completion(&target->done);
  469. /*
  470. * The CM event handling code will set status to
  471. * SRP_PORT_REDIRECT if we get a port redirect REJ
  472. * back, or SRP_DLID_REDIRECT if we get a lid/qp
  473. * redirect REJ back.
  474. */
  475. switch (target->status) {
  476. case 0:
  477. srp_change_conn_state(target, true);
  478. return 0;
  479. case SRP_PORT_REDIRECT:
  480. ret = srp_lookup_path(target);
  481. if (ret)
  482. return ret;
  483. break;
  484. case SRP_DLID_REDIRECT:
  485. break;
  486. case SRP_STALE_CONN:
  487. /* Our current CM id was stale, and is now in timewait.
  488. * Try to reconnect with a new one.
  489. */
  490. if (!retries-- || srp_new_cm_id(target)) {
  491. shost_printk(KERN_ERR, target->scsi_host, PFX
  492. "giving up on stale connection\n");
  493. target->status = -ECONNRESET;
  494. return target->status;
  495. }
  496. shost_printk(KERN_ERR, target->scsi_host, PFX
  497. "retrying stale connection\n");
  498. break;
  499. default:
  500. return target->status;
  501. }
  502. }
  503. }
  504. static void srp_unmap_data(struct scsi_cmnd *scmnd,
  505. struct srp_target_port *target,
  506. struct srp_request *req)
  507. {
  508. struct ib_device *ibdev = target->srp_host->srp_dev->dev;
  509. struct ib_pool_fmr **pfmr;
  510. if (!scsi_sglist(scmnd) ||
  511. (scmnd->sc_data_direction != DMA_TO_DEVICE &&
  512. scmnd->sc_data_direction != DMA_FROM_DEVICE))
  513. return;
  514. pfmr = req->fmr_list;
  515. while (req->nfmr--)
  516. ib_fmr_pool_unmap(*pfmr++);
  517. ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
  518. scmnd->sc_data_direction);
  519. }
  520. /**
  521. * srp_claim_req - Take ownership of the scmnd associated with a request.
  522. * @target: SRP target port.
  523. * @req: SRP request.
  524. * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
  525. * ownership of @req->scmnd if it equals @scmnd.
  526. *
  527. * Return value:
  528. * Either NULL or a pointer to the SCSI command the caller became owner of.
  529. */
  530. static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
  531. struct srp_request *req,
  532. struct scsi_cmnd *scmnd)
  533. {
  534. unsigned long flags;
  535. spin_lock_irqsave(&target->lock, flags);
  536. if (!scmnd) {
  537. scmnd = req->scmnd;
  538. req->scmnd = NULL;
  539. } else if (req->scmnd == scmnd) {
  540. req->scmnd = NULL;
  541. } else {
  542. scmnd = NULL;
  543. }
  544. spin_unlock_irqrestore(&target->lock, flags);
  545. return scmnd;
  546. }
  547. /**
  548. * srp_free_req() - Unmap data and add request to the free request list.
  549. */
  550. static void srp_free_req(struct srp_target_port *target,
  551. struct srp_request *req, struct scsi_cmnd *scmnd,
  552. s32 req_lim_delta)
  553. {
  554. unsigned long flags;
  555. srp_unmap_data(scmnd, target, req);
  556. spin_lock_irqsave(&target->lock, flags);
  557. target->req_lim += req_lim_delta;
  558. list_add_tail(&req->list, &target->free_reqs);
  559. spin_unlock_irqrestore(&target->lock, flags);
  560. }
  561. static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
  562. {
  563. struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
  564. if (scmnd) {
  565. srp_free_req(target, req, scmnd, 0);
  566. scmnd->result = DID_RESET << 16;
  567. scmnd->scsi_done(scmnd);
  568. }
  569. }
  570. static int srp_reconnect_target(struct srp_target_port *target)
  571. {
  572. struct Scsi_Host *shost = target->scsi_host;
  573. int i, ret;
  574. if (target->state != SRP_TARGET_LIVE)
  575. return -EAGAIN;
  576. scsi_target_block(&shost->shost_gendev);
  577. srp_disconnect_target(target);
  578. /*
  579. * Now get a new local CM ID so that we avoid confusing the
  580. * target in case things are really fouled up.
  581. */
  582. ret = srp_new_cm_id(target);
  583. if (ret)
  584. goto unblock;
  585. ret = srp_create_target_ib(target);
  586. if (ret)
  587. goto unblock;
  588. for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
  589. struct srp_request *req = &target->req_ring[i];
  590. if (req->scmnd)
  591. srp_reset_req(target, req);
  592. }
  593. INIT_LIST_HEAD(&target->free_tx);
  594. for (i = 0; i < SRP_SQ_SIZE; ++i)
  595. list_add(&target->tx_ring[i]->list, &target->free_tx);
  596. ret = srp_connect_target(target);
  597. unblock:
  598. scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
  599. SDEV_TRANSPORT_OFFLINE);
  600. if (ret)
  601. goto err;
  602. shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n");
  603. return ret;
  604. err:
  605. shost_printk(KERN_ERR, target->scsi_host,
  606. PFX "reconnect failed (%d), removing target port.\n", ret);
  607. /*
  608. * We couldn't reconnect, so kill our target port off.
  609. * However, we have to defer the real removal because we
  610. * are in the context of the SCSI error handler now, which
  611. * will deadlock if we call scsi_remove_host().
  612. */
  613. srp_queue_remove_work(target);
  614. return ret;
  615. }
  616. static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
  617. unsigned int dma_len, u32 rkey)
  618. {
  619. struct srp_direct_buf *desc = state->desc;
  620. desc->va = cpu_to_be64(dma_addr);
  621. desc->key = cpu_to_be32(rkey);
  622. desc->len = cpu_to_be32(dma_len);
  623. state->total_len += dma_len;
  624. state->desc++;
  625. state->ndesc++;
  626. }
  627. static int srp_map_finish_fmr(struct srp_map_state *state,
  628. struct srp_target_port *target)
  629. {
  630. struct srp_device *dev = target->srp_host->srp_dev;
  631. struct ib_pool_fmr *fmr;
  632. u64 io_addr = 0;
  633. if (!state->npages)
  634. return 0;
  635. if (state->npages == 1) {
  636. srp_map_desc(state, state->base_dma_addr, state->fmr_len,
  637. target->rkey);
  638. state->npages = state->fmr_len = 0;
  639. return 0;
  640. }
  641. fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
  642. state->npages, io_addr);
  643. if (IS_ERR(fmr))
  644. return PTR_ERR(fmr);
  645. *state->next_fmr++ = fmr;
  646. state->nfmr++;
  647. srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
  648. state->npages = state->fmr_len = 0;
  649. return 0;
  650. }
  651. static void srp_map_update_start(struct srp_map_state *state,
  652. struct scatterlist *sg, int sg_index,
  653. dma_addr_t dma_addr)
  654. {
  655. state->unmapped_sg = sg;
  656. state->unmapped_index = sg_index;
  657. state->unmapped_addr = dma_addr;
  658. }
  659. static int srp_map_sg_entry(struct srp_map_state *state,
  660. struct srp_target_port *target,
  661. struct scatterlist *sg, int sg_index,
  662. int use_fmr)
  663. {
  664. struct srp_device *dev = target->srp_host->srp_dev;
  665. struct ib_device *ibdev = dev->dev;
  666. dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
  667. unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
  668. unsigned int len;
  669. int ret;
  670. if (!dma_len)
  671. return 0;
  672. if (use_fmr == SRP_MAP_NO_FMR) {
  673. /* Once we're in direct map mode for a request, we don't
  674. * go back to FMR mode, so no need to update anything
  675. * other than the descriptor.
  676. */
  677. srp_map_desc(state, dma_addr, dma_len, target->rkey);
  678. return 0;
  679. }
  680. /* If we start at an offset into the FMR page, don't merge into
  681. * the current FMR. Finish it out, and use the kernel's MR for this
  682. * sg entry. This is to avoid potential bugs on some SRP targets
  683. * that were never quite defined, but went away when the initiator
  684. * avoided using FMR on such page fragments.
  685. */
  686. if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
  687. ret = srp_map_finish_fmr(state, target);
  688. if (ret)
  689. return ret;
  690. srp_map_desc(state, dma_addr, dma_len, target->rkey);
  691. srp_map_update_start(state, NULL, 0, 0);
  692. return 0;
  693. }
  694. /* If this is the first sg to go into the FMR, save our position.
  695. * We need to know the first unmapped entry, its index, and the
  696. * first unmapped address within that entry to be able to restart
  697. * mapping after an error.
  698. */
  699. if (!state->unmapped_sg)
  700. srp_map_update_start(state, sg, sg_index, dma_addr);
  701. while (dma_len) {
  702. if (state->npages == SRP_FMR_SIZE) {
  703. ret = srp_map_finish_fmr(state, target);
  704. if (ret)
  705. return ret;
  706. srp_map_update_start(state, sg, sg_index, dma_addr);
  707. }
  708. len = min_t(unsigned int, dma_len, dev->fmr_page_size);
  709. if (!state->npages)
  710. state->base_dma_addr = dma_addr;
  711. state->pages[state->npages++] = dma_addr;
  712. state->fmr_len += len;
  713. dma_addr += len;
  714. dma_len -= len;
  715. }
  716. /* If the last entry of the FMR wasn't a full page, then we need to
  717. * close it out and start a new one -- we can only merge at page
  718. * boundries.
  719. */
  720. ret = 0;
  721. if (len != dev->fmr_page_size) {
  722. ret = srp_map_finish_fmr(state, target);
  723. if (!ret)
  724. srp_map_update_start(state, NULL, 0, 0);
  725. }
  726. return ret;
  727. }
  728. static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
  729. struct srp_request *req)
  730. {
  731. struct scatterlist *scat, *sg;
  732. struct srp_cmd *cmd = req->cmd->buf;
  733. int i, len, nents, count, use_fmr;
  734. struct srp_device *dev;
  735. struct ib_device *ibdev;
  736. struct srp_map_state state;
  737. struct srp_indirect_buf *indirect_hdr;
  738. u32 table_len;
  739. u8 fmt;
  740. if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
  741. return sizeof (struct srp_cmd);
  742. if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
  743. scmnd->sc_data_direction != DMA_TO_DEVICE) {
  744. shost_printk(KERN_WARNING, target->scsi_host,
  745. PFX "Unhandled data direction %d\n",
  746. scmnd->sc_data_direction);
  747. return -EINVAL;
  748. }
  749. nents = scsi_sg_count(scmnd);
  750. scat = scsi_sglist(scmnd);
  751. dev = target->srp_host->srp_dev;
  752. ibdev = dev->dev;
  753. count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
  754. if (unlikely(count == 0))
  755. return -EIO;
  756. fmt = SRP_DATA_DESC_DIRECT;
  757. len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
  758. if (count == 1) {
  759. /*
  760. * The midlayer only generated a single gather/scatter
  761. * entry, or DMA mapping coalesced everything to a
  762. * single entry. So a direct descriptor along with
  763. * the DMA MR suffices.
  764. */
  765. struct srp_direct_buf *buf = (void *) cmd->add_data;
  766. buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
  767. buf->key = cpu_to_be32(target->rkey);
  768. buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
  769. req->nfmr = 0;
  770. goto map_complete;
  771. }
  772. /* We have more than one scatter/gather entry, so build our indirect
  773. * descriptor table, trying to merge as many entries with FMR as we
  774. * can.
  775. */
  776. indirect_hdr = (void *) cmd->add_data;
  777. ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
  778. target->indirect_size, DMA_TO_DEVICE);
  779. memset(&state, 0, sizeof(state));
  780. state.desc = req->indirect_desc;
  781. state.pages = req->map_page;
  782. state.next_fmr = req->fmr_list;
  783. use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
  784. for_each_sg(scat, sg, count, i) {
  785. if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
  786. /* FMR mapping failed, so backtrack to the first
  787. * unmapped entry and continue on without using FMR.
  788. */
  789. dma_addr_t dma_addr;
  790. unsigned int dma_len;
  791. backtrack:
  792. sg = state.unmapped_sg;
  793. i = state.unmapped_index;
  794. dma_addr = ib_sg_dma_address(ibdev, sg);
  795. dma_len = ib_sg_dma_len(ibdev, sg);
  796. dma_len -= (state.unmapped_addr - dma_addr);
  797. dma_addr = state.unmapped_addr;
  798. use_fmr = SRP_MAP_NO_FMR;
  799. srp_map_desc(&state, dma_addr, dma_len, target->rkey);
  800. }
  801. }
  802. if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
  803. goto backtrack;
  804. /* We've mapped the request, now pull as much of the indirect
  805. * descriptor table as we can into the command buffer. If this
  806. * target is not using an external indirect table, we are
  807. * guaranteed to fit into the command, as the SCSI layer won't
  808. * give us more S/G entries than we allow.
  809. */
  810. req->nfmr = state.nfmr;
  811. if (state.ndesc == 1) {
  812. /* FMR mapping was able to collapse this to one entry,
  813. * so use a direct descriptor.
  814. */
  815. struct srp_direct_buf *buf = (void *) cmd->add_data;
  816. *buf = req->indirect_desc[0];
  817. goto map_complete;
  818. }
  819. if (unlikely(target->cmd_sg_cnt < state.ndesc &&
  820. !target->allow_ext_sg)) {
  821. shost_printk(KERN_ERR, target->scsi_host,
  822. "Could not fit S/G list into SRP_CMD\n");
  823. return -EIO;
  824. }
  825. count = min(state.ndesc, target->cmd_sg_cnt);
  826. table_len = state.ndesc * sizeof (struct srp_direct_buf);
  827. fmt = SRP_DATA_DESC_INDIRECT;
  828. len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
  829. len += count * sizeof (struct srp_direct_buf);
  830. memcpy(indirect_hdr->desc_list, req->indirect_desc,
  831. count * sizeof (struct srp_direct_buf));
  832. indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
  833. indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
  834. indirect_hdr->table_desc.len = cpu_to_be32(table_len);
  835. indirect_hdr->len = cpu_to_be32(state.total_len);
  836. if (scmnd->sc_data_direction == DMA_TO_DEVICE)
  837. cmd->data_out_desc_cnt = count;
  838. else
  839. cmd->data_in_desc_cnt = count;
  840. ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
  841. DMA_TO_DEVICE);
  842. map_complete:
  843. if (scmnd->sc_data_direction == DMA_TO_DEVICE)
  844. cmd->buf_fmt = fmt << 4;
  845. else
  846. cmd->buf_fmt = fmt;
  847. return len;
  848. }
  849. /*
  850. * Return an IU and possible credit to the free pool
  851. */
  852. static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
  853. enum srp_iu_type iu_type)
  854. {
  855. unsigned long flags;
  856. spin_lock_irqsave(&target->lock, flags);
  857. list_add(&iu->list, &target->free_tx);
  858. if (iu_type != SRP_IU_RSP)
  859. ++target->req_lim;
  860. spin_unlock_irqrestore(&target->lock, flags);
  861. }
  862. /*
  863. * Must be called with target->lock held to protect req_lim and free_tx.
  864. * If IU is not sent, it must be returned using srp_put_tx_iu().
  865. *
  866. * Note:
  867. * An upper limit for the number of allocated information units for each
  868. * request type is:
  869. * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
  870. * more than Scsi_Host.can_queue requests.
  871. * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
  872. * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
  873. * one unanswered SRP request to an initiator.
  874. */
  875. static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
  876. enum srp_iu_type iu_type)
  877. {
  878. s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
  879. struct srp_iu *iu;
  880. srp_send_completion(target->send_cq, target);
  881. if (list_empty(&target->free_tx))
  882. return NULL;
  883. /* Initiator responses to target requests do not consume credits */
  884. if (iu_type != SRP_IU_RSP) {
  885. if (target->req_lim <= rsv) {
  886. ++target->zero_req_lim;
  887. return NULL;
  888. }
  889. --target->req_lim;
  890. }
  891. iu = list_first_entry(&target->free_tx, struct srp_iu, list);
  892. list_del(&iu->list);
  893. return iu;
  894. }
  895. static int srp_post_send(struct srp_target_port *target,
  896. struct srp_iu *iu, int len)
  897. {
  898. struct ib_sge list;
  899. struct ib_send_wr wr, *bad_wr;
  900. list.addr = iu->dma;
  901. list.length = len;
  902. list.lkey = target->lkey;
  903. wr.next = NULL;
  904. wr.wr_id = (uintptr_t) iu;
  905. wr.sg_list = &list;
  906. wr.num_sge = 1;
  907. wr.opcode = IB_WR_SEND;
  908. wr.send_flags = IB_SEND_SIGNALED;
  909. return ib_post_send(target->qp, &wr, &bad_wr);
  910. }
  911. static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
  912. {
  913. struct ib_recv_wr wr, *bad_wr;
  914. struct ib_sge list;
  915. list.addr = iu->dma;
  916. list.length = iu->size;
  917. list.lkey = target->lkey;
  918. wr.next = NULL;
  919. wr.wr_id = (uintptr_t) iu;
  920. wr.sg_list = &list;
  921. wr.num_sge = 1;
  922. return ib_post_recv(target->qp, &wr, &bad_wr);
  923. }
  924. static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
  925. {
  926. struct srp_request *req;
  927. struct scsi_cmnd *scmnd;
  928. unsigned long flags;
  929. if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
  930. spin_lock_irqsave(&target->lock, flags);
  931. target->req_lim += be32_to_cpu(rsp->req_lim_delta);
  932. spin_unlock_irqrestore(&target->lock, flags);
  933. target->tsk_mgmt_status = -1;
  934. if (be32_to_cpu(rsp->resp_data_len) >= 4)
  935. target->tsk_mgmt_status = rsp->data[3];
  936. complete(&target->tsk_mgmt_done);
  937. } else {
  938. req = &target->req_ring[rsp->tag];
  939. scmnd = srp_claim_req(target, req, NULL);
  940. if (!scmnd) {
  941. shost_printk(KERN_ERR, target->scsi_host,
  942. "Null scmnd for RSP w/tag %016llx\n",
  943. (unsigned long long) rsp->tag);
  944. spin_lock_irqsave(&target->lock, flags);
  945. target->req_lim += be32_to_cpu(rsp->req_lim_delta);
  946. spin_unlock_irqrestore(&target->lock, flags);
  947. return;
  948. }
  949. scmnd->result = rsp->status;
  950. if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
  951. memcpy(scmnd->sense_buffer, rsp->data +
  952. be32_to_cpu(rsp->resp_data_len),
  953. min_t(int, be32_to_cpu(rsp->sense_data_len),
  954. SCSI_SENSE_BUFFERSIZE));
  955. }
  956. if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
  957. scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
  958. else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
  959. scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
  960. srp_free_req(target, req, scmnd,
  961. be32_to_cpu(rsp->req_lim_delta));
  962. scmnd->host_scribble = NULL;
  963. scmnd->scsi_done(scmnd);
  964. }
  965. }
  966. static int srp_response_common(struct srp_target_port *target, s32 req_delta,
  967. void *rsp, int len)
  968. {
  969. struct ib_device *dev = target->srp_host->srp_dev->dev;
  970. unsigned long flags;
  971. struct srp_iu *iu;
  972. int err;
  973. spin_lock_irqsave(&target->lock, flags);
  974. target->req_lim += req_delta;
  975. iu = __srp_get_tx_iu(target, SRP_IU_RSP);
  976. spin_unlock_irqrestore(&target->lock, flags);
  977. if (!iu) {
  978. shost_printk(KERN_ERR, target->scsi_host, PFX
  979. "no IU available to send response\n");
  980. return 1;
  981. }
  982. ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
  983. memcpy(iu->buf, rsp, len);
  984. ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
  985. err = srp_post_send(target, iu, len);
  986. if (err) {
  987. shost_printk(KERN_ERR, target->scsi_host, PFX
  988. "unable to post response: %d\n", err);
  989. srp_put_tx_iu(target, iu, SRP_IU_RSP);
  990. }
  991. return err;
  992. }
  993. static void srp_process_cred_req(struct srp_target_port *target,
  994. struct srp_cred_req *req)
  995. {
  996. struct srp_cred_rsp rsp = {
  997. .opcode = SRP_CRED_RSP,
  998. .tag = req->tag,
  999. };
  1000. s32 delta = be32_to_cpu(req->req_lim_delta);
  1001. if (srp_response_common(target, delta, &rsp, sizeof rsp))
  1002. shost_printk(KERN_ERR, target->scsi_host, PFX
  1003. "problems processing SRP_CRED_REQ\n");
  1004. }
  1005. static void srp_process_aer_req(struct srp_target_port *target,
  1006. struct srp_aer_req *req)
  1007. {
  1008. struct srp_aer_rsp rsp = {
  1009. .opcode = SRP_AER_RSP,
  1010. .tag = req->tag,
  1011. };
  1012. s32 delta = be32_to_cpu(req->req_lim_delta);
  1013. shost_printk(KERN_ERR, target->scsi_host, PFX
  1014. "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
  1015. if (srp_response_common(target, delta, &rsp, sizeof rsp))
  1016. shost_printk(KERN_ERR, target->scsi_host, PFX
  1017. "problems processing SRP_AER_REQ\n");
  1018. }
  1019. static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
  1020. {
  1021. struct ib_device *dev = target->srp_host->srp_dev->dev;
  1022. struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
  1023. int res;
  1024. u8 opcode;
  1025. ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
  1026. DMA_FROM_DEVICE);
  1027. opcode = *(u8 *) iu->buf;
  1028. if (0) {
  1029. shost_printk(KERN_ERR, target->scsi_host,
  1030. PFX "recv completion, opcode 0x%02x\n", opcode);
  1031. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
  1032. iu->buf, wc->byte_len, true);
  1033. }
  1034. switch (opcode) {
  1035. case SRP_RSP:
  1036. srp_process_rsp(target, iu->buf);
  1037. break;
  1038. case SRP_CRED_REQ:
  1039. srp_process_cred_req(target, iu->buf);
  1040. break;
  1041. case SRP_AER_REQ:
  1042. srp_process_aer_req(target, iu->buf);
  1043. break;
  1044. case SRP_T_LOGOUT:
  1045. /* XXX Handle target logout */
  1046. shost_printk(KERN_WARNING, target->scsi_host,
  1047. PFX "Got target logout request\n");
  1048. break;
  1049. default:
  1050. shost_printk(KERN_WARNING, target->scsi_host,
  1051. PFX "Unhandled SRP opcode 0x%02x\n", opcode);
  1052. break;
  1053. }
  1054. ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
  1055. DMA_FROM_DEVICE);
  1056. res = srp_post_recv(target, iu);
  1057. if (res != 0)
  1058. shost_printk(KERN_ERR, target->scsi_host,
  1059. PFX "Recv failed with error code %d\n", res);
  1060. }
  1061. static void srp_handle_qp_err(enum ib_wc_status wc_status,
  1062. enum ib_wc_opcode wc_opcode,
  1063. struct srp_target_port *target)
  1064. {
  1065. if (target->connected && !target->qp_in_error) {
  1066. shost_printk(KERN_ERR, target->scsi_host,
  1067. PFX "failed %s status %d\n",
  1068. wc_opcode & IB_WC_RECV ? "receive" : "send",
  1069. wc_status);
  1070. }
  1071. target->qp_in_error = true;
  1072. }
  1073. static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
  1074. {
  1075. struct srp_target_port *target = target_ptr;
  1076. struct ib_wc wc;
  1077. ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
  1078. while (ib_poll_cq(cq, 1, &wc) > 0) {
  1079. if (likely(wc.status == IB_WC_SUCCESS)) {
  1080. srp_handle_recv(target, &wc);
  1081. } else {
  1082. srp_handle_qp_err(wc.status, wc.opcode, target);
  1083. }
  1084. }
  1085. }
  1086. static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
  1087. {
  1088. struct srp_target_port *target = target_ptr;
  1089. struct ib_wc wc;
  1090. struct srp_iu *iu;
  1091. while (ib_poll_cq(cq, 1, &wc) > 0) {
  1092. if (likely(wc.status == IB_WC_SUCCESS)) {
  1093. iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
  1094. list_add(&iu->list, &target->free_tx);
  1095. } else {
  1096. srp_handle_qp_err(wc.status, wc.opcode, target);
  1097. }
  1098. }
  1099. }
  1100. static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
  1101. {
  1102. struct srp_target_port *target = host_to_target(shost);
  1103. struct srp_request *req;
  1104. struct srp_iu *iu;
  1105. struct srp_cmd *cmd;
  1106. struct ib_device *dev;
  1107. unsigned long flags;
  1108. int len;
  1109. spin_lock_irqsave(&target->lock, flags);
  1110. iu = __srp_get_tx_iu(target, SRP_IU_CMD);
  1111. if (!iu)
  1112. goto err_unlock;
  1113. req = list_first_entry(&target->free_reqs, struct srp_request, list);
  1114. list_del(&req->list);
  1115. spin_unlock_irqrestore(&target->lock, flags);
  1116. dev = target->srp_host->srp_dev->dev;
  1117. ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
  1118. DMA_TO_DEVICE);
  1119. scmnd->result = 0;
  1120. scmnd->host_scribble = (void *) req;
  1121. cmd = iu->buf;
  1122. memset(cmd, 0, sizeof *cmd);
  1123. cmd->opcode = SRP_CMD;
  1124. cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
  1125. cmd->tag = req->index;
  1126. memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
  1127. req->scmnd = scmnd;
  1128. req->cmd = iu;
  1129. len = srp_map_data(scmnd, target, req);
  1130. if (len < 0) {
  1131. shost_printk(KERN_ERR, target->scsi_host,
  1132. PFX "Failed to map data\n");
  1133. goto err_iu;
  1134. }
  1135. ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
  1136. DMA_TO_DEVICE);
  1137. if (srp_post_send(target, iu, len)) {
  1138. shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
  1139. goto err_unmap;
  1140. }
  1141. return 0;
  1142. err_unmap:
  1143. srp_unmap_data(scmnd, target, req);
  1144. err_iu:
  1145. srp_put_tx_iu(target, iu, SRP_IU_CMD);
  1146. spin_lock_irqsave(&target->lock, flags);
  1147. list_add(&req->list, &target->free_reqs);
  1148. err_unlock:
  1149. spin_unlock_irqrestore(&target->lock, flags);
  1150. return SCSI_MLQUEUE_HOST_BUSY;
  1151. }
  1152. static int srp_alloc_iu_bufs(struct srp_target_port *target)
  1153. {
  1154. int i;
  1155. for (i = 0; i < SRP_RQ_SIZE; ++i) {
  1156. target->rx_ring[i] = srp_alloc_iu(target->srp_host,
  1157. target->max_ti_iu_len,
  1158. GFP_KERNEL, DMA_FROM_DEVICE);
  1159. if (!target->rx_ring[i])
  1160. goto err;
  1161. }
  1162. for (i = 0; i < SRP_SQ_SIZE; ++i) {
  1163. target->tx_ring[i] = srp_alloc_iu(target->srp_host,
  1164. target->max_iu_len,
  1165. GFP_KERNEL, DMA_TO_DEVICE);
  1166. if (!target->tx_ring[i])
  1167. goto err;
  1168. list_add(&target->tx_ring[i]->list, &target->free_tx);
  1169. }
  1170. return 0;
  1171. err:
  1172. for (i = 0; i < SRP_RQ_SIZE; ++i) {
  1173. srp_free_iu(target->srp_host, target->rx_ring[i]);
  1174. target->rx_ring[i] = NULL;
  1175. }
  1176. for (i = 0; i < SRP_SQ_SIZE; ++i) {
  1177. srp_free_iu(target->srp_host, target->tx_ring[i]);
  1178. target->tx_ring[i] = NULL;
  1179. }
  1180. return -ENOMEM;
  1181. }
  1182. static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
  1183. {
  1184. uint64_t T_tr_ns, max_compl_time_ms;
  1185. uint32_t rq_tmo_jiffies;
  1186. /*
  1187. * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
  1188. * table 91), both the QP timeout and the retry count have to be set
  1189. * for RC QP's during the RTR to RTS transition.
  1190. */
  1191. WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
  1192. (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
  1193. /*
  1194. * Set target->rq_tmo_jiffies to one second more than the largest time
  1195. * it can take before an error completion is generated. See also
  1196. * C9-140..142 in the IBTA spec for more information about how to
  1197. * convert the QP Local ACK Timeout value to nanoseconds.
  1198. */
  1199. T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
  1200. max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
  1201. do_div(max_compl_time_ms, NSEC_PER_MSEC);
  1202. rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
  1203. return rq_tmo_jiffies;
  1204. }
  1205. static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
  1206. struct srp_login_rsp *lrsp,
  1207. struct srp_target_port *target)
  1208. {
  1209. struct ib_qp_attr *qp_attr = NULL;
  1210. int attr_mask = 0;
  1211. int ret;
  1212. int i;
  1213. if (lrsp->opcode == SRP_LOGIN_RSP) {
  1214. target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
  1215. target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
  1216. /*
  1217. * Reserve credits for task management so we don't
  1218. * bounce requests back to the SCSI mid-layer.
  1219. */
  1220. target->scsi_host->can_queue
  1221. = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
  1222. target->scsi_host->can_queue);
  1223. } else {
  1224. shost_printk(KERN_WARNING, target->scsi_host,
  1225. PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
  1226. ret = -ECONNRESET;
  1227. goto error;
  1228. }
  1229. if (!target->rx_ring[0]) {
  1230. ret = srp_alloc_iu_bufs(target);
  1231. if (ret)
  1232. goto error;
  1233. }
  1234. ret = -ENOMEM;
  1235. qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
  1236. if (!qp_attr)
  1237. goto error;
  1238. qp_attr->qp_state = IB_QPS_RTR;
  1239. ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
  1240. if (ret)
  1241. goto error_free;
  1242. ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
  1243. if (ret)
  1244. goto error_free;
  1245. for (i = 0; i < SRP_RQ_SIZE; i++) {
  1246. struct srp_iu *iu = target->rx_ring[i];
  1247. ret = srp_post_recv(target, iu);
  1248. if (ret)
  1249. goto error_free;
  1250. }
  1251. qp_attr->qp_state = IB_QPS_RTS;
  1252. ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
  1253. if (ret)
  1254. goto error_free;
  1255. target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
  1256. ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
  1257. if (ret)
  1258. goto error_free;
  1259. ret = ib_send_cm_rtu(cm_id, NULL, 0);
  1260. error_free:
  1261. kfree(qp_attr);
  1262. error:
  1263. target->status = ret;
  1264. }
  1265. static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
  1266. struct ib_cm_event *event,
  1267. struct srp_target_port *target)
  1268. {
  1269. struct Scsi_Host *shost = target->scsi_host;
  1270. struct ib_class_port_info *cpi;
  1271. int opcode;
  1272. switch (event->param.rej_rcvd.reason) {
  1273. case IB_CM_REJ_PORT_CM_REDIRECT:
  1274. cpi = event->param.rej_rcvd.ari;
  1275. target->path.dlid = cpi->redirect_lid;
  1276. target->path.pkey = cpi->redirect_pkey;
  1277. cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
  1278. memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
  1279. target->status = target->path.dlid ?
  1280. SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
  1281. break;
  1282. case IB_CM_REJ_PORT_REDIRECT:
  1283. if (srp_target_is_topspin(target)) {
  1284. /*
  1285. * Topspin/Cisco SRP gateways incorrectly send
  1286. * reject reason code 25 when they mean 24
  1287. * (port redirect).
  1288. */
  1289. memcpy(target->path.dgid.raw,
  1290. event->param.rej_rcvd.ari, 16);
  1291. shost_printk(KERN_DEBUG, shost,
  1292. PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
  1293. (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
  1294. (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
  1295. target->status = SRP_PORT_REDIRECT;
  1296. } else {
  1297. shost_printk(KERN_WARNING, shost,
  1298. " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
  1299. target->status = -ECONNRESET;
  1300. }
  1301. break;
  1302. case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
  1303. shost_printk(KERN_WARNING, shost,
  1304. " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
  1305. target->status = -ECONNRESET;
  1306. break;
  1307. case IB_CM_REJ_CONSUMER_DEFINED:
  1308. opcode = *(u8 *) event->private_data;
  1309. if (opcode == SRP_LOGIN_REJ) {
  1310. struct srp_login_rej *rej = event->private_data;
  1311. u32 reason = be32_to_cpu(rej->reason);
  1312. if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
  1313. shost_printk(KERN_WARNING, shost,
  1314. PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
  1315. else
  1316. shost_printk(KERN_WARNING, shost,
  1317. PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
  1318. } else
  1319. shost_printk(KERN_WARNING, shost,
  1320. " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
  1321. " opcode 0x%02x\n", opcode);
  1322. target->status = -ECONNRESET;
  1323. break;
  1324. case IB_CM_REJ_STALE_CONN:
  1325. shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
  1326. target->status = SRP_STALE_CONN;
  1327. break;
  1328. default:
  1329. shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
  1330. event->param.rej_rcvd.reason);
  1331. target->status = -ECONNRESET;
  1332. }
  1333. }
  1334. static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  1335. {
  1336. struct srp_target_port *target = cm_id->context;
  1337. int comp = 0;
  1338. switch (event->event) {
  1339. case IB_CM_REQ_ERROR:
  1340. shost_printk(KERN_DEBUG, target->scsi_host,
  1341. PFX "Sending CM REQ failed\n");
  1342. comp = 1;
  1343. target->status = -ECONNRESET;
  1344. break;
  1345. case IB_CM_REP_RECEIVED:
  1346. comp = 1;
  1347. srp_cm_rep_handler(cm_id, event->private_data, target);
  1348. break;
  1349. case IB_CM_REJ_RECEIVED:
  1350. shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
  1351. comp = 1;
  1352. srp_cm_rej_handler(cm_id, event, target);
  1353. break;
  1354. case IB_CM_DREQ_RECEIVED:
  1355. shost_printk(KERN_WARNING, target->scsi_host,
  1356. PFX "DREQ received - connection closed\n");
  1357. srp_change_conn_state(target, false);
  1358. if (ib_send_cm_drep(cm_id, NULL, 0))
  1359. shost_printk(KERN_ERR, target->scsi_host,
  1360. PFX "Sending CM DREP failed\n");
  1361. break;
  1362. case IB_CM_TIMEWAIT_EXIT:
  1363. shost_printk(KERN_ERR, target->scsi_host,
  1364. PFX "connection closed\n");
  1365. target->status = 0;
  1366. break;
  1367. case IB_CM_MRA_RECEIVED:
  1368. case IB_CM_DREQ_ERROR:
  1369. case IB_CM_DREP_RECEIVED:
  1370. break;
  1371. default:
  1372. shost_printk(KERN_WARNING, target->scsi_host,
  1373. PFX "Unhandled CM event %d\n", event->event);
  1374. break;
  1375. }
  1376. if (comp)
  1377. complete(&target->done);
  1378. return 0;
  1379. }
  1380. static int srp_send_tsk_mgmt(struct srp_target_port *target,
  1381. u64 req_tag, unsigned int lun, u8 func)
  1382. {
  1383. struct ib_device *dev = target->srp_host->srp_dev->dev;
  1384. struct srp_iu *iu;
  1385. struct srp_tsk_mgmt *tsk_mgmt;
  1386. init_completion(&target->tsk_mgmt_done);
  1387. spin_lock_irq(&target->lock);
  1388. iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
  1389. spin_unlock_irq(&target->lock);
  1390. if (!iu)
  1391. return -1;
  1392. ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
  1393. DMA_TO_DEVICE);
  1394. tsk_mgmt = iu->buf;
  1395. memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
  1396. tsk_mgmt->opcode = SRP_TSK_MGMT;
  1397. tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
  1398. tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
  1399. tsk_mgmt->tsk_mgmt_func = func;
  1400. tsk_mgmt->task_tag = req_tag;
  1401. ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
  1402. DMA_TO_DEVICE);
  1403. if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
  1404. srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
  1405. return -1;
  1406. }
  1407. if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
  1408. msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
  1409. return -1;
  1410. return 0;
  1411. }
  1412. static int srp_abort(struct scsi_cmnd *scmnd)
  1413. {
  1414. struct srp_target_port *target = host_to_target(scmnd->device->host);
  1415. struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
  1416. shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
  1417. if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
  1418. return FAILED;
  1419. srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
  1420. SRP_TSK_ABORT_TASK);
  1421. srp_free_req(target, req, scmnd, 0);
  1422. scmnd->result = DID_ABORT << 16;
  1423. scmnd->scsi_done(scmnd);
  1424. return SUCCESS;
  1425. }
  1426. static int srp_reset_device(struct scsi_cmnd *scmnd)
  1427. {
  1428. struct srp_target_port *target = host_to_target(scmnd->device->host);
  1429. int i;
  1430. shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
  1431. if (target->qp_in_error)
  1432. return FAILED;
  1433. if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
  1434. SRP_TSK_LUN_RESET))
  1435. return FAILED;
  1436. if (target->tsk_mgmt_status)
  1437. return FAILED;
  1438. for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
  1439. struct srp_request *req = &target->req_ring[i];
  1440. if (req->scmnd && req->scmnd->device == scmnd->device)
  1441. srp_reset_req(target, req);
  1442. }
  1443. return SUCCESS;
  1444. }
  1445. static int srp_reset_host(struct scsi_cmnd *scmnd)
  1446. {
  1447. struct srp_target_port *target = host_to_target(scmnd->device->host);
  1448. int ret = FAILED;
  1449. shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
  1450. if (!srp_reconnect_target(target))
  1451. ret = SUCCESS;
  1452. return ret;
  1453. }
  1454. static int srp_slave_configure(struct scsi_device *sdev)
  1455. {
  1456. struct Scsi_Host *shost = sdev->host;
  1457. struct srp_target_port *target = host_to_target(shost);
  1458. struct request_queue *q = sdev->request_queue;
  1459. unsigned long timeout;
  1460. if (sdev->type == TYPE_DISK) {
  1461. timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
  1462. blk_queue_rq_timeout(q, timeout);
  1463. }
  1464. return 0;
  1465. }
  1466. static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
  1467. char *buf)
  1468. {
  1469. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1470. return sprintf(buf, "0x%016llx\n",
  1471. (unsigned long long) be64_to_cpu(target->id_ext));
  1472. }
  1473. static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
  1474. char *buf)
  1475. {
  1476. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1477. return sprintf(buf, "0x%016llx\n",
  1478. (unsigned long long) be64_to_cpu(target->ioc_guid));
  1479. }
  1480. static ssize_t show_service_id(struct device *dev,
  1481. struct device_attribute *attr, char *buf)
  1482. {
  1483. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1484. return sprintf(buf, "0x%016llx\n",
  1485. (unsigned long long) be64_to_cpu(target->service_id));
  1486. }
  1487. static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
  1488. char *buf)
  1489. {
  1490. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1491. return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
  1492. }
  1493. static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
  1494. char *buf)
  1495. {
  1496. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1497. return sprintf(buf, "%pI6\n", target->path.dgid.raw);
  1498. }
  1499. static ssize_t show_orig_dgid(struct device *dev,
  1500. struct device_attribute *attr, char *buf)
  1501. {
  1502. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1503. return sprintf(buf, "%pI6\n", target->orig_dgid);
  1504. }
  1505. static ssize_t show_req_lim(struct device *dev,
  1506. struct device_attribute *attr, char *buf)
  1507. {
  1508. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1509. return sprintf(buf, "%d\n", target->req_lim);
  1510. }
  1511. static ssize_t show_zero_req_lim(struct device *dev,
  1512. struct device_attribute *attr, char *buf)
  1513. {
  1514. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1515. return sprintf(buf, "%d\n", target->zero_req_lim);
  1516. }
  1517. static ssize_t show_local_ib_port(struct device *dev,
  1518. struct device_attribute *attr, char *buf)
  1519. {
  1520. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1521. return sprintf(buf, "%d\n", target->srp_host->port);
  1522. }
  1523. static ssize_t show_local_ib_device(struct device *dev,
  1524. struct device_attribute *attr, char *buf)
  1525. {
  1526. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1527. return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
  1528. }
  1529. static ssize_t show_cmd_sg_entries(struct device *dev,
  1530. struct device_attribute *attr, char *buf)
  1531. {
  1532. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1533. return sprintf(buf, "%u\n", target->cmd_sg_cnt);
  1534. }
  1535. static ssize_t show_allow_ext_sg(struct device *dev,
  1536. struct device_attribute *attr, char *buf)
  1537. {
  1538. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1539. return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
  1540. }
  1541. static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
  1542. static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
  1543. static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
  1544. static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
  1545. static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
  1546. static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
  1547. static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
  1548. static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
  1549. static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
  1550. static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
  1551. static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
  1552. static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
  1553. static struct device_attribute *srp_host_attrs[] = {
  1554. &dev_attr_id_ext,
  1555. &dev_attr_ioc_guid,
  1556. &dev_attr_service_id,
  1557. &dev_attr_pkey,
  1558. &dev_attr_dgid,
  1559. &dev_attr_orig_dgid,
  1560. &dev_attr_req_lim,
  1561. &dev_attr_zero_req_lim,
  1562. &dev_attr_local_ib_port,
  1563. &dev_attr_local_ib_device,
  1564. &dev_attr_cmd_sg_entries,
  1565. &dev_attr_allow_ext_sg,
  1566. NULL
  1567. };
  1568. static struct scsi_host_template srp_template = {
  1569. .module = THIS_MODULE,
  1570. .name = "InfiniBand SRP initiator",
  1571. .proc_name = DRV_NAME,
  1572. .slave_configure = srp_slave_configure,
  1573. .info = srp_target_info,
  1574. .queuecommand = srp_queuecommand,
  1575. .eh_abort_handler = srp_abort,
  1576. .eh_device_reset_handler = srp_reset_device,
  1577. .eh_host_reset_handler = srp_reset_host,
  1578. .sg_tablesize = SRP_DEF_SG_TABLESIZE,
  1579. .can_queue = SRP_CMD_SQ_SIZE,
  1580. .this_id = -1,
  1581. .cmd_per_lun = SRP_CMD_SQ_SIZE,
  1582. .use_clustering = ENABLE_CLUSTERING,
  1583. .shost_attrs = srp_host_attrs
  1584. };
  1585. static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
  1586. {
  1587. struct srp_rport_identifiers ids;
  1588. struct srp_rport *rport;
  1589. sprintf(target->target_name, "SRP.T10:%016llX",
  1590. (unsigned long long) be64_to_cpu(target->id_ext));
  1591. if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
  1592. return -ENODEV;
  1593. memcpy(ids.port_id, &target->id_ext, 8);
  1594. memcpy(ids.port_id + 8, &target->ioc_guid, 8);
  1595. ids.roles = SRP_RPORT_ROLE_TARGET;
  1596. rport = srp_rport_add(target->scsi_host, &ids);
  1597. if (IS_ERR(rport)) {
  1598. scsi_remove_host(target->scsi_host);
  1599. return PTR_ERR(rport);
  1600. }
  1601. spin_lock(&host->target_lock);
  1602. list_add_tail(&target->list, &host->target_list);
  1603. spin_unlock(&host->target_lock);
  1604. target->state = SRP_TARGET_LIVE;
  1605. target->connected = false;
  1606. scsi_scan_target(&target->scsi_host->shost_gendev,
  1607. 0, target->scsi_id, SCAN_WILD_CARD, 0);
  1608. return 0;
  1609. }
  1610. static void srp_release_dev(struct device *dev)
  1611. {
  1612. struct srp_host *host =
  1613. container_of(dev, struct srp_host, dev);
  1614. complete(&host->released);
  1615. }
  1616. static struct class srp_class = {
  1617. .name = "infiniband_srp",
  1618. .dev_release = srp_release_dev
  1619. };
  1620. /*
  1621. * Target ports are added by writing
  1622. *
  1623. * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
  1624. * pkey=<P_Key>,service_id=<service ID>
  1625. *
  1626. * to the add_target sysfs attribute.
  1627. */
  1628. enum {
  1629. SRP_OPT_ERR = 0,
  1630. SRP_OPT_ID_EXT = 1 << 0,
  1631. SRP_OPT_IOC_GUID = 1 << 1,
  1632. SRP_OPT_DGID = 1 << 2,
  1633. SRP_OPT_PKEY = 1 << 3,
  1634. SRP_OPT_SERVICE_ID = 1 << 4,
  1635. SRP_OPT_MAX_SECT = 1 << 5,
  1636. SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
  1637. SRP_OPT_IO_CLASS = 1 << 7,
  1638. SRP_OPT_INITIATOR_EXT = 1 << 8,
  1639. SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
  1640. SRP_OPT_ALLOW_EXT_SG = 1 << 10,
  1641. SRP_OPT_SG_TABLESIZE = 1 << 11,
  1642. SRP_OPT_ALL = (SRP_OPT_ID_EXT |
  1643. SRP_OPT_IOC_GUID |
  1644. SRP_OPT_DGID |
  1645. SRP_OPT_PKEY |
  1646. SRP_OPT_SERVICE_ID),
  1647. };
  1648. static const match_table_t srp_opt_tokens = {
  1649. { SRP_OPT_ID_EXT, "id_ext=%s" },
  1650. { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
  1651. { SRP_OPT_DGID, "dgid=%s" },
  1652. { SRP_OPT_PKEY, "pkey=%x" },
  1653. { SRP_OPT_SERVICE_ID, "service_id=%s" },
  1654. { SRP_OPT_MAX_SECT, "max_sect=%d" },
  1655. { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
  1656. { SRP_OPT_IO_CLASS, "io_class=%x" },
  1657. { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
  1658. { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
  1659. { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
  1660. { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
  1661. { SRP_OPT_ERR, NULL }
  1662. };
  1663. static int srp_parse_options(const char *buf, struct srp_target_port *target)
  1664. {
  1665. char *options, *sep_opt;
  1666. char *p;
  1667. char dgid[3];
  1668. substring_t args[MAX_OPT_ARGS];
  1669. int opt_mask = 0;
  1670. int token;
  1671. int ret = -EINVAL;
  1672. int i;
  1673. options = kstrdup(buf, GFP_KERNEL);
  1674. if (!options)
  1675. return -ENOMEM;
  1676. sep_opt = options;
  1677. while ((p = strsep(&sep_opt, ",")) != NULL) {
  1678. if (!*p)
  1679. continue;
  1680. token = match_token(p, srp_opt_tokens, args);
  1681. opt_mask |= token;
  1682. switch (token) {
  1683. case SRP_OPT_ID_EXT:
  1684. p = match_strdup(args);
  1685. if (!p) {
  1686. ret = -ENOMEM;
  1687. goto out;
  1688. }
  1689. target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1690. kfree(p);
  1691. break;
  1692. case SRP_OPT_IOC_GUID:
  1693. p = match_strdup(args);
  1694. if (!p) {
  1695. ret = -ENOMEM;
  1696. goto out;
  1697. }
  1698. target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1699. kfree(p);
  1700. break;
  1701. case SRP_OPT_DGID:
  1702. p = match_strdup(args);
  1703. if (!p) {
  1704. ret = -ENOMEM;
  1705. goto out;
  1706. }
  1707. if (strlen(p) != 32) {
  1708. pr_warn("bad dest GID parameter '%s'\n", p);
  1709. kfree(p);
  1710. goto out;
  1711. }
  1712. for (i = 0; i < 16; ++i) {
  1713. strlcpy(dgid, p + i * 2, 3);
  1714. target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
  1715. }
  1716. kfree(p);
  1717. memcpy(target->orig_dgid, target->path.dgid.raw, 16);
  1718. break;
  1719. case SRP_OPT_PKEY:
  1720. if (match_hex(args, &token)) {
  1721. pr_warn("bad P_Key parameter '%s'\n", p);
  1722. goto out;
  1723. }
  1724. target->path.pkey = cpu_to_be16(token);
  1725. break;
  1726. case SRP_OPT_SERVICE_ID:
  1727. p = match_strdup(args);
  1728. if (!p) {
  1729. ret = -ENOMEM;
  1730. goto out;
  1731. }
  1732. target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1733. target->path.service_id = target->service_id;
  1734. kfree(p);
  1735. break;
  1736. case SRP_OPT_MAX_SECT:
  1737. if (match_int(args, &token)) {
  1738. pr_warn("bad max sect parameter '%s'\n", p);
  1739. goto out;
  1740. }
  1741. target->scsi_host->max_sectors = token;
  1742. break;
  1743. case SRP_OPT_MAX_CMD_PER_LUN:
  1744. if (match_int(args, &token)) {
  1745. pr_warn("bad max cmd_per_lun parameter '%s'\n",
  1746. p);
  1747. goto out;
  1748. }
  1749. target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
  1750. break;
  1751. case SRP_OPT_IO_CLASS:
  1752. if (match_hex(args, &token)) {
  1753. pr_warn("bad IO class parameter '%s'\n", p);
  1754. goto out;
  1755. }
  1756. if (token != SRP_REV10_IB_IO_CLASS &&
  1757. token != SRP_REV16A_IB_IO_CLASS) {
  1758. pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
  1759. token, SRP_REV10_IB_IO_CLASS,
  1760. SRP_REV16A_IB_IO_CLASS);
  1761. goto out;
  1762. }
  1763. target->io_class = token;
  1764. break;
  1765. case SRP_OPT_INITIATOR_EXT:
  1766. p = match_strdup(args);
  1767. if (!p) {
  1768. ret = -ENOMEM;
  1769. goto out;
  1770. }
  1771. target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1772. kfree(p);
  1773. break;
  1774. case SRP_OPT_CMD_SG_ENTRIES:
  1775. if (match_int(args, &token) || token < 1 || token > 255) {
  1776. pr_warn("bad max cmd_sg_entries parameter '%s'\n",
  1777. p);
  1778. goto out;
  1779. }
  1780. target->cmd_sg_cnt = token;
  1781. break;
  1782. case SRP_OPT_ALLOW_EXT_SG:
  1783. if (match_int(args, &token)) {
  1784. pr_warn("bad allow_ext_sg parameter '%s'\n", p);
  1785. goto out;
  1786. }
  1787. target->allow_ext_sg = !!token;
  1788. break;
  1789. case SRP_OPT_SG_TABLESIZE:
  1790. if (match_int(args, &token) || token < 1 ||
  1791. token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
  1792. pr_warn("bad max sg_tablesize parameter '%s'\n",
  1793. p);
  1794. goto out;
  1795. }
  1796. target->sg_tablesize = token;
  1797. break;
  1798. default:
  1799. pr_warn("unknown parameter or missing value '%s' in target creation request\n",
  1800. p);
  1801. goto out;
  1802. }
  1803. }
  1804. if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
  1805. ret = 0;
  1806. else
  1807. for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
  1808. if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
  1809. !(srp_opt_tokens[i].token & opt_mask))
  1810. pr_warn("target creation request is missing parameter '%s'\n",
  1811. srp_opt_tokens[i].pattern);
  1812. out:
  1813. kfree(options);
  1814. return ret;
  1815. }
  1816. static ssize_t srp_create_target(struct device *dev,
  1817. struct device_attribute *attr,
  1818. const char *buf, size_t count)
  1819. {
  1820. struct srp_host *host =
  1821. container_of(dev, struct srp_host, dev);
  1822. struct Scsi_Host *target_host;
  1823. struct srp_target_port *target;
  1824. struct ib_device *ibdev = host->srp_dev->dev;
  1825. dma_addr_t dma_addr;
  1826. int i, ret;
  1827. target_host = scsi_host_alloc(&srp_template,
  1828. sizeof (struct srp_target_port));
  1829. if (!target_host)
  1830. return -ENOMEM;
  1831. target_host->transportt = ib_srp_transport_template;
  1832. target_host->max_channel = 0;
  1833. target_host->max_id = 1;
  1834. target_host->max_lun = SRP_MAX_LUN;
  1835. target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
  1836. target = host_to_target(target_host);
  1837. target->io_class = SRP_REV16A_IB_IO_CLASS;
  1838. target->scsi_host = target_host;
  1839. target->srp_host = host;
  1840. target->lkey = host->srp_dev->mr->lkey;
  1841. target->rkey = host->srp_dev->mr->rkey;
  1842. target->cmd_sg_cnt = cmd_sg_entries;
  1843. target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
  1844. target->allow_ext_sg = allow_ext_sg;
  1845. ret = srp_parse_options(buf, target);
  1846. if (ret)
  1847. goto err;
  1848. if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
  1849. target->cmd_sg_cnt < target->sg_tablesize) {
  1850. pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
  1851. target->sg_tablesize = target->cmd_sg_cnt;
  1852. }
  1853. target_host->sg_tablesize = target->sg_tablesize;
  1854. target->indirect_size = target->sg_tablesize *
  1855. sizeof (struct srp_direct_buf);
  1856. target->max_iu_len = sizeof (struct srp_cmd) +
  1857. sizeof (struct srp_indirect_buf) +
  1858. target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
  1859. INIT_WORK(&target->remove_work, srp_remove_work);
  1860. spin_lock_init(&target->lock);
  1861. INIT_LIST_HEAD(&target->free_tx);
  1862. INIT_LIST_HEAD(&target->free_reqs);
  1863. for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
  1864. struct srp_request *req = &target->req_ring[i];
  1865. req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
  1866. GFP_KERNEL);
  1867. req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
  1868. GFP_KERNEL);
  1869. req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
  1870. if (!req->fmr_list || !req->map_page || !req->indirect_desc)
  1871. goto err_free_mem;
  1872. dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
  1873. target->indirect_size,
  1874. DMA_TO_DEVICE);
  1875. if (ib_dma_mapping_error(ibdev, dma_addr))
  1876. goto err_free_mem;
  1877. req->indirect_dma_addr = dma_addr;
  1878. req->index = i;
  1879. list_add_tail(&req->list, &target->free_reqs);
  1880. }
  1881. ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
  1882. shost_printk(KERN_DEBUG, target->scsi_host, PFX
  1883. "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
  1884. "service_id %016llx dgid %pI6\n",
  1885. (unsigned long long) be64_to_cpu(target->id_ext),
  1886. (unsigned long long) be64_to_cpu(target->ioc_guid),
  1887. be16_to_cpu(target->path.pkey),
  1888. (unsigned long long) be64_to_cpu(target->service_id),
  1889. target->path.dgid.raw);
  1890. ret = srp_create_target_ib(target);
  1891. if (ret)
  1892. goto err_free_mem;
  1893. ret = srp_new_cm_id(target);
  1894. if (ret)
  1895. goto err_free_ib;
  1896. ret = srp_connect_target(target);
  1897. if (ret) {
  1898. shost_printk(KERN_ERR, target->scsi_host,
  1899. PFX "Connection failed\n");
  1900. goto err_cm_id;
  1901. }
  1902. ret = srp_add_target(host, target);
  1903. if (ret)
  1904. goto err_disconnect;
  1905. return count;
  1906. err_disconnect:
  1907. srp_disconnect_target(target);
  1908. err_cm_id:
  1909. ib_destroy_cm_id(target->cm_id);
  1910. err_free_ib:
  1911. srp_free_target_ib(target);
  1912. err_free_mem:
  1913. srp_free_req_data(target);
  1914. err:
  1915. scsi_host_put(target_host);
  1916. return ret;
  1917. }
  1918. static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
  1919. static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
  1920. char *buf)
  1921. {
  1922. struct srp_host *host = container_of(dev, struct srp_host, dev);
  1923. return sprintf(buf, "%s\n", host->srp_dev->dev->name);
  1924. }
  1925. static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
  1926. static ssize_t show_port(struct device *dev, struct device_attribute *attr,
  1927. char *buf)
  1928. {
  1929. struct srp_host *host = container_of(dev, struct srp_host, dev);
  1930. return sprintf(buf, "%d\n", host->port);
  1931. }
  1932. static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
  1933. static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
  1934. {
  1935. struct srp_host *host;
  1936. host = kzalloc(sizeof *host, GFP_KERNEL);
  1937. if (!host)
  1938. return NULL;
  1939. INIT_LIST_HEAD(&host->target_list);
  1940. spin_lock_init(&host->target_lock);
  1941. init_completion(&host->released);
  1942. host->srp_dev = device;
  1943. host->port = port;
  1944. host->dev.class = &srp_class;
  1945. host->dev.parent = device->dev->dma_device;
  1946. dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
  1947. if (device_register(&host->dev))
  1948. goto free_host;
  1949. if (device_create_file(&host->dev, &dev_attr_add_target))
  1950. goto err_class;
  1951. if (device_create_file(&host->dev, &dev_attr_ibdev))
  1952. goto err_class;
  1953. if (device_create_file(&host->dev, &dev_attr_port))
  1954. goto err_class;
  1955. return host;
  1956. err_class:
  1957. device_unregister(&host->dev);
  1958. free_host:
  1959. kfree(host);
  1960. return NULL;
  1961. }
  1962. static void srp_add_one(struct ib_device *device)
  1963. {
  1964. struct srp_device *srp_dev;
  1965. struct ib_device_attr *dev_attr;
  1966. struct ib_fmr_pool_param fmr_param;
  1967. struct srp_host *host;
  1968. int max_pages_per_fmr, fmr_page_shift, s, e, p;
  1969. dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
  1970. if (!dev_attr)
  1971. return;
  1972. if (ib_query_device(device, dev_attr)) {
  1973. pr_warn("Query device failed for %s\n", device->name);
  1974. goto free_attr;
  1975. }
  1976. srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
  1977. if (!srp_dev)
  1978. goto free_attr;
  1979. /*
  1980. * Use the smallest page size supported by the HCA, down to a
  1981. * minimum of 4096 bytes. We're unlikely to build large sglists
  1982. * out of smaller entries.
  1983. */
  1984. fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
  1985. srp_dev->fmr_page_size = 1 << fmr_page_shift;
  1986. srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
  1987. srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
  1988. INIT_LIST_HEAD(&srp_dev->dev_list);
  1989. srp_dev->dev = device;
  1990. srp_dev->pd = ib_alloc_pd(device);
  1991. if (IS_ERR(srp_dev->pd))
  1992. goto free_dev;
  1993. srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
  1994. IB_ACCESS_LOCAL_WRITE |
  1995. IB_ACCESS_REMOTE_READ |
  1996. IB_ACCESS_REMOTE_WRITE);
  1997. if (IS_ERR(srp_dev->mr))
  1998. goto err_pd;
  1999. for (max_pages_per_fmr = SRP_FMR_SIZE;
  2000. max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
  2001. max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
  2002. memset(&fmr_param, 0, sizeof fmr_param);
  2003. fmr_param.pool_size = SRP_FMR_POOL_SIZE;
  2004. fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
  2005. fmr_param.cache = 1;
  2006. fmr_param.max_pages_per_fmr = max_pages_per_fmr;
  2007. fmr_param.page_shift = fmr_page_shift;
  2008. fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
  2009. IB_ACCESS_REMOTE_WRITE |
  2010. IB_ACCESS_REMOTE_READ);
  2011. srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
  2012. if (!IS_ERR(srp_dev->fmr_pool))
  2013. break;
  2014. }
  2015. if (IS_ERR(srp_dev->fmr_pool))
  2016. srp_dev->fmr_pool = NULL;
  2017. if (device->node_type == RDMA_NODE_IB_SWITCH) {
  2018. s = 0;
  2019. e = 0;
  2020. } else {
  2021. s = 1;
  2022. e = device->phys_port_cnt;
  2023. }
  2024. for (p = s; p <= e; ++p) {
  2025. host = srp_add_port(srp_dev, p);
  2026. if (host)
  2027. list_add_tail(&host->list, &srp_dev->dev_list);
  2028. }
  2029. ib_set_client_data(device, &srp_client, srp_dev);
  2030. goto free_attr;
  2031. err_pd:
  2032. ib_dealloc_pd(srp_dev->pd);
  2033. free_dev:
  2034. kfree(srp_dev);
  2035. free_attr:
  2036. kfree(dev_attr);
  2037. }
  2038. static void srp_remove_one(struct ib_device *device)
  2039. {
  2040. struct srp_device *srp_dev;
  2041. struct srp_host *host, *tmp_host;
  2042. struct srp_target_port *target;
  2043. srp_dev = ib_get_client_data(device, &srp_client);
  2044. list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
  2045. device_unregister(&host->dev);
  2046. /*
  2047. * Wait for the sysfs entry to go away, so that no new
  2048. * target ports can be created.
  2049. */
  2050. wait_for_completion(&host->released);
  2051. /*
  2052. * Remove all target ports.
  2053. */
  2054. spin_lock(&host->target_lock);
  2055. list_for_each_entry(target, &host->target_list, list)
  2056. srp_queue_remove_work(target);
  2057. spin_unlock(&host->target_lock);
  2058. /*
  2059. * Wait for target port removal tasks.
  2060. */
  2061. flush_workqueue(system_long_wq);
  2062. kfree(host);
  2063. }
  2064. if (srp_dev->fmr_pool)
  2065. ib_destroy_fmr_pool(srp_dev->fmr_pool);
  2066. ib_dereg_mr(srp_dev->mr);
  2067. ib_dealloc_pd(srp_dev->pd);
  2068. kfree(srp_dev);
  2069. }
  2070. static struct srp_function_template ib_srp_transport_functions = {
  2071. };
  2072. static int __init srp_init_module(void)
  2073. {
  2074. int ret;
  2075. BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
  2076. if (srp_sg_tablesize) {
  2077. pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
  2078. if (!cmd_sg_entries)
  2079. cmd_sg_entries = srp_sg_tablesize;
  2080. }
  2081. if (!cmd_sg_entries)
  2082. cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
  2083. if (cmd_sg_entries > 255) {
  2084. pr_warn("Clamping cmd_sg_entries to 255\n");
  2085. cmd_sg_entries = 255;
  2086. }
  2087. if (!indirect_sg_entries)
  2088. indirect_sg_entries = cmd_sg_entries;
  2089. else if (indirect_sg_entries < cmd_sg_entries) {
  2090. pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
  2091. cmd_sg_entries);
  2092. indirect_sg_entries = cmd_sg_entries;
  2093. }
  2094. ib_srp_transport_template =
  2095. srp_attach_transport(&ib_srp_transport_functions);
  2096. if (!ib_srp_transport_template)
  2097. return -ENOMEM;
  2098. ret = class_register(&srp_class);
  2099. if (ret) {
  2100. pr_err("couldn't register class infiniband_srp\n");
  2101. srp_release_transport(ib_srp_transport_template);
  2102. return ret;
  2103. }
  2104. ib_sa_register_client(&srp_sa_client);
  2105. ret = ib_register_client(&srp_client);
  2106. if (ret) {
  2107. pr_err("couldn't register IB client\n");
  2108. srp_release_transport(ib_srp_transport_template);
  2109. ib_sa_unregister_client(&srp_sa_client);
  2110. class_unregister(&srp_class);
  2111. return ret;
  2112. }
  2113. return 0;
  2114. }
  2115. static void __exit srp_cleanup_module(void)
  2116. {
  2117. ib_unregister_client(&srp_client);
  2118. ib_sa_unregister_client(&srp_sa_client);
  2119. class_unregister(&srp_class);
  2120. srp_release_transport(ib_srp_transport_template);
  2121. }
  2122. module_init(srp_init_module);
  2123. module_exit(srp_cleanup_module);