csio_lnode.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133
  1. /*
  2. * This file is part of the Chelsio FCoE driver for Linux.
  3. *
  4. * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/delay.h>
  36. #include <linux/slab.h>
  37. #include <linux/utsname.h>
  38. #include <scsi/scsi_device.h>
  39. #include <scsi/scsi_transport_fc.h>
  40. #include <asm/unaligned.h>
  41. #include <scsi/fc/fc_els.h>
  42. #include <scsi/fc/fc_fs.h>
  43. #include <scsi/fc/fc_gs.h>
  44. #include <scsi/fc/fc_ms.h>
  45. #include "csio_hw.h"
  46. #include "csio_mb.h"
  47. #include "csio_lnode.h"
  48. #include "csio_rnode.h"
  49. int csio_fcoe_rnodes = 1024;
  50. int csio_fdmi_enable = 1;
  51. #define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
  52. /* Lnode SM declarations */
  53. static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
  54. static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
  55. static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
  56. static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
  57. static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
  58. void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
  59. enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
  60. /* LN event mapping */
  61. static enum csio_ln_ev fwevt_to_lnevt[] = {
  62. CSIO_LNE_NONE, /* None */
  63. CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
  64. CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
  65. CSIO_LNE_NONE, /* PLOGI_RCVD */
  66. CSIO_LNE_NONE, /* PLOGO_RCVD */
  67. CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
  68. CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
  69. CSIO_LNE_NONE, /* PRLI_RCVD */
  70. CSIO_LNE_NONE, /* PRLO_RCVD */
  71. CSIO_LNE_NONE, /* NPORT_ID_CHGD */
  72. CSIO_LNE_LOGO, /* FLOGO_RCVD */
  73. CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
  74. CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
  75. CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
  76. CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
  77. CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
  78. CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
  79. CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
  80. CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
  81. CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
  82. CSIO_LNE_NONE, /* PRLI_TMO */
  83. CSIO_LNE_NONE, /* ADISC_TMO */
  84. CSIO_LNE_NONE, /* RSCN_DEV_LOST */
  85. CSIO_LNE_NONE, /* SCR_ACC_RCVD */
  86. CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
  87. CSIO_LNE_NONE, /* LOGO_SNT */
  88. CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
  89. };
  90. #define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
  91. CSIO_LNE_NONE : \
  92. fwevt_to_lnevt[_evt])
  93. #define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
  94. #define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
  95. #define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
  96. #define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
  97. /*
  98. * csio_ln_match_by_portid - lookup lnode using given portid.
  99. * @hw: HW module
  100. * @portid: port-id.
  101. *
  102. * If found, returns lnode matching given portid otherwise returns NULL.
  103. */
  104. static struct csio_lnode *
  105. csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
  106. {
  107. struct csio_lnode *ln = hw->rln;
  108. struct list_head *tmp;
  109. /* Match siblings lnode with portid */
  110. list_for_each(tmp, &hw->sln_head) {
  111. ln = (struct csio_lnode *) tmp;
  112. if (ln->portid == portid)
  113. return ln;
  114. }
  115. return NULL;
  116. }
  117. /*
  118. * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
  119. * @hw - HW module
  120. * @vnpi - vnp index.
  121. * Returns - If found, returns lnode matching given vnp id
  122. * otherwise returns NULL.
  123. */
  124. static struct csio_lnode *
  125. csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
  126. {
  127. struct list_head *tmp1, *tmp2;
  128. struct csio_lnode *sln = NULL, *cln = NULL;
  129. if (list_empty(&hw->sln_head)) {
  130. CSIO_INC_STATS(hw, n_lnlkup_miss);
  131. return NULL;
  132. }
  133. /* Traverse sibling lnodes */
  134. list_for_each(tmp1, &hw->sln_head) {
  135. sln = (struct csio_lnode *) tmp1;
  136. /* Match sibling lnode */
  137. if (sln->vnp_flowid == vnp_id)
  138. return sln;
  139. if (list_empty(&sln->cln_head))
  140. continue;
  141. /* Traverse children lnodes */
  142. list_for_each(tmp2, &sln->cln_head) {
  143. cln = (struct csio_lnode *) tmp2;
  144. if (cln->vnp_flowid == vnp_id)
  145. return cln;
  146. }
  147. }
  148. CSIO_INC_STATS(hw, n_lnlkup_miss);
  149. return NULL;
  150. }
  151. /**
  152. * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
  153. * @hw: HW module.
  154. * @wwpn: WWPN.
  155. *
  156. * If found, returns lnode matching given wwpn, returns NULL otherwise.
  157. */
  158. struct csio_lnode *
  159. csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
  160. {
  161. struct list_head *tmp1, *tmp2;
  162. struct csio_lnode *sln = NULL, *cln = NULL;
  163. if (list_empty(&hw->sln_head)) {
  164. CSIO_INC_STATS(hw, n_lnlkup_miss);
  165. return NULL;
  166. }
  167. /* Traverse sibling lnodes */
  168. list_for_each(tmp1, &hw->sln_head) {
  169. sln = (struct csio_lnode *) tmp1;
  170. /* Match sibling lnode */
  171. if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
  172. return sln;
  173. if (list_empty(&sln->cln_head))
  174. continue;
  175. /* Traverse children lnodes */
  176. list_for_each(tmp2, &sln->cln_head) {
  177. cln = (struct csio_lnode *) tmp2;
  178. if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
  179. return cln;
  180. }
  181. }
  182. return NULL;
  183. }
  184. /* FDMI */
  185. static void
  186. csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
  187. {
  188. struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
  189. cmd->ct_rev = FC_CT_REV;
  190. cmd->ct_fs_type = type;
  191. cmd->ct_fs_subtype = sub_type;
  192. cmd->ct_cmd = op;
  193. }
  194. static int
  195. csio_hostname(uint8_t *buf, size_t buf_len)
  196. {
  197. if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
  198. return 0;
  199. return -1;
  200. }
  201. static int
  202. csio_osname(uint8_t *buf, size_t buf_len)
  203. {
  204. if (snprintf(buf, buf_len, "%s %s %s",
  205. init_utsname()->sysname,
  206. init_utsname()->release,
  207. init_utsname()->version) > 0)
  208. return 0;
  209. return -1;
  210. }
  211. static inline void
  212. csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
  213. {
  214. struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
  215. ae->type = htons(type);
  216. len += 4; /* includes attribute type and length */
  217. len = (len + 3) & ~3; /* should be multiple of 4 bytes */
  218. ae->len = htons(len);
  219. memset(ae->value, 0, len - 4);
  220. memcpy(ae->value, val, len);
  221. *ptr += len;
  222. }
  223. /*
  224. * csio_ln_fdmi_done - FDMI registeration completion
  225. * @hw: HW context
  226. * @fdmi_req: fdmi request
  227. */
  228. static void
  229. csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  230. {
  231. void *cmd;
  232. struct csio_lnode *ln = fdmi_req->lnode;
  233. if (fdmi_req->wr_status != FW_SUCCESS) {
  234. csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
  235. fdmi_req->wr_status);
  236. CSIO_INC_STATS(ln, n_fdmi_err);
  237. }
  238. cmd = fdmi_req->dma_buf.vaddr;
  239. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  240. csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
  241. csio_ct_reason(cmd), csio_ct_expl(cmd));
  242. }
  243. }
  244. /*
  245. * csio_ln_fdmi_rhba_cbfn - RHBA completion
  246. * @hw: HW context
  247. * @fdmi_req: fdmi request
  248. */
  249. static void
  250. csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  251. {
  252. void *cmd;
  253. uint8_t *pld;
  254. uint32_t len = 0;
  255. struct csio_lnode *ln = fdmi_req->lnode;
  256. struct fs_fdmi_attrs *attrib_blk;
  257. struct fc_fdmi_port_name *port_name;
  258. uint8_t buf[64];
  259. uint32_t val;
  260. uint8_t *fc4_type;
  261. if (fdmi_req->wr_status != FW_SUCCESS) {
  262. csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
  263. fdmi_req->wr_status);
  264. CSIO_INC_STATS(ln, n_fdmi_err);
  265. }
  266. cmd = fdmi_req->dma_buf.vaddr;
  267. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  268. csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
  269. csio_ct_reason(cmd), csio_ct_expl(cmd));
  270. }
  271. if (!csio_is_rnode_ready(fdmi_req->rnode)) {
  272. CSIO_INC_STATS(ln, n_fdmi_err);
  273. return;
  274. }
  275. /* Prepare CT hdr for RPA cmd */
  276. memset(cmd, 0, FC_CT_HDR_LEN);
  277. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_RPA));
  278. /* Prepare RPA payload */
  279. pld = (uint8_t *)csio_ct_get_pld(cmd);
  280. port_name = (struct fc_fdmi_port_name *)pld;
  281. memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
  282. pld += sizeof(*port_name);
  283. /* Start appending Port attributes */
  284. attrib_blk = (struct fs_fdmi_attrs *)pld;
  285. attrib_blk->numattrs = 0;
  286. len += sizeof(attrib_blk->numattrs);
  287. pld += sizeof(attrib_blk->numattrs);
  288. fc4_type = &buf[0];
  289. memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
  290. fc4_type[2] = 1;
  291. fc4_type[7] = 1;
  292. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
  293. fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
  294. attrib_blk->numattrs++;
  295. val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
  296. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
  297. (uint8_t *)&val,
  298. FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
  299. attrib_blk->numattrs++;
  300. if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
  301. val = htonl(FC_PORTSPEED_1GBIT);
  302. else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
  303. val = htonl(FC_PORTSPEED_10GBIT);
  304. else
  305. val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
  306. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
  307. (uint8_t *)&val,
  308. FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
  309. attrib_blk->numattrs++;
  310. val = htonl(ln->ln_sparm.csp.sp_bb_data);
  311. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
  312. (uint8_t *)&val, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
  313. attrib_blk->numattrs++;
  314. strcpy(buf, "csiostor");
  315. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
  316. (uint16_t)strlen(buf));
  317. attrib_blk->numattrs++;
  318. if (!csio_hostname(buf, sizeof(buf))) {
  319. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
  320. buf, (uint16_t)strlen(buf));
  321. attrib_blk->numattrs++;
  322. }
  323. attrib_blk->numattrs = ntohl(attrib_blk->numattrs);
  324. len = (uint32_t)(pld - (uint8_t *)cmd);
  325. /* Submit FDMI RPA request */
  326. spin_lock_irq(&hw->lock);
  327. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
  328. FCOE_CT, &fdmi_req->dma_buf, len)) {
  329. CSIO_INC_STATS(ln, n_fdmi_err);
  330. csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
  331. }
  332. spin_unlock_irq(&hw->lock);
  333. }
  334. /*
  335. * csio_ln_fdmi_dprt_cbfn - DPRT completion
  336. * @hw: HW context
  337. * @fdmi_req: fdmi request
  338. */
  339. static void
  340. csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  341. {
  342. void *cmd;
  343. uint8_t *pld;
  344. uint32_t len = 0;
  345. uint32_t maxpayload = htonl(65536);
  346. struct fc_fdmi_hba_identifier *hbaid;
  347. struct csio_lnode *ln = fdmi_req->lnode;
  348. struct fc_fdmi_rpl *reg_pl;
  349. struct fs_fdmi_attrs *attrib_blk;
  350. uint8_t buf[64];
  351. if (fdmi_req->wr_status != FW_SUCCESS) {
  352. csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
  353. fdmi_req->wr_status);
  354. CSIO_INC_STATS(ln, n_fdmi_err);
  355. }
  356. if (!csio_is_rnode_ready(fdmi_req->rnode)) {
  357. CSIO_INC_STATS(ln, n_fdmi_err);
  358. return;
  359. }
  360. cmd = fdmi_req->dma_buf.vaddr;
  361. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  362. csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
  363. csio_ct_reason(cmd), csio_ct_expl(cmd));
  364. }
  365. /* Prepare CT hdr for RHBA cmd */
  366. memset(cmd, 0, FC_CT_HDR_LEN);
  367. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_RHBA));
  368. len = FC_CT_HDR_LEN;
  369. /* Prepare RHBA payload */
  370. pld = (uint8_t *)csio_ct_get_pld(cmd);
  371. hbaid = (struct fc_fdmi_hba_identifier *)pld;
  372. memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
  373. pld += sizeof(*hbaid);
  374. /* Register one port per hba */
  375. reg_pl = (struct fc_fdmi_rpl *)pld;
  376. reg_pl->numport = ntohl(1);
  377. memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
  378. pld += sizeof(*reg_pl);
  379. /* Start appending HBA attributes hba */
  380. attrib_blk = (struct fs_fdmi_attrs *)pld;
  381. attrib_blk->numattrs = 0;
  382. len += sizeof(attrib_blk->numattrs);
  383. pld += sizeof(attrib_blk->numattrs);
  384. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
  385. FC_FDMI_HBA_ATTR_NODENAME_LEN);
  386. attrib_blk->numattrs++;
  387. memset(buf, 0, sizeof(buf));
  388. strcpy(buf, "Chelsio Communications");
  389. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
  390. (uint16_t)strlen(buf));
  391. attrib_blk->numattrs++;
  392. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
  393. hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
  394. attrib_blk->numattrs++;
  395. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
  396. (uint16_t)sizeof(hw->vpd.id));
  397. attrib_blk->numattrs++;
  398. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
  399. hw->model_desc, (uint16_t)strlen(hw->model_desc));
  400. attrib_blk->numattrs++;
  401. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
  402. hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
  403. attrib_blk->numattrs++;
  404. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
  405. hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
  406. attrib_blk->numattrs++;
  407. if (!csio_osname(buf, sizeof(buf))) {
  408. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
  409. buf, (uint16_t)strlen(buf));
  410. attrib_blk->numattrs++;
  411. }
  412. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
  413. (uint8_t *)&maxpayload,
  414. FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
  415. len = (uint32_t)(pld - (uint8_t *)cmd);
  416. attrib_blk->numattrs++;
  417. attrib_blk->numattrs = ntohl(attrib_blk->numattrs);
  418. /* Submit FDMI RHBA request */
  419. spin_lock_irq(&hw->lock);
  420. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
  421. FCOE_CT, &fdmi_req->dma_buf, len)) {
  422. CSIO_INC_STATS(ln, n_fdmi_err);
  423. csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
  424. }
  425. spin_unlock_irq(&hw->lock);
  426. }
  427. /*
  428. * csio_ln_fdmi_dhba_cbfn - DHBA completion
  429. * @hw: HW context
  430. * @fdmi_req: fdmi request
  431. */
  432. static void
  433. csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  434. {
  435. struct csio_lnode *ln = fdmi_req->lnode;
  436. void *cmd;
  437. struct fc_fdmi_port_name *port_name;
  438. uint32_t len;
  439. if (fdmi_req->wr_status != FW_SUCCESS) {
  440. csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
  441. fdmi_req->wr_status);
  442. CSIO_INC_STATS(ln, n_fdmi_err);
  443. }
  444. if (!csio_is_rnode_ready(fdmi_req->rnode)) {
  445. CSIO_INC_STATS(ln, n_fdmi_err);
  446. return;
  447. }
  448. cmd = fdmi_req->dma_buf.vaddr;
  449. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  450. csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
  451. csio_ct_reason(cmd), csio_ct_expl(cmd));
  452. }
  453. /* Send FDMI cmd to de-register any Port attributes if registered
  454. * before
  455. */
  456. /* Prepare FDMI DPRT cmd */
  457. memset(cmd, 0, FC_CT_HDR_LEN);
  458. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_DPRT));
  459. len = FC_CT_HDR_LEN;
  460. port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
  461. memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
  462. len += sizeof(*port_name);
  463. /* Submit FDMI request */
  464. spin_lock_irq(&hw->lock);
  465. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
  466. FCOE_CT, &fdmi_req->dma_buf, len)) {
  467. CSIO_INC_STATS(ln, n_fdmi_err);
  468. csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
  469. }
  470. spin_unlock_irq(&hw->lock);
  471. }
  472. /**
  473. * csio_ln_fdmi_start - Start an FDMI request.
  474. * @ln: lnode
  475. * @context: session context
  476. *
  477. * Issued with lock held.
  478. */
  479. int
  480. csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
  481. {
  482. struct csio_ioreq *fdmi_req;
  483. struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
  484. void *cmd;
  485. struct fc_fdmi_hba_identifier *hbaid;
  486. uint32_t len;
  487. if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
  488. return -EPROTONOSUPPORT;
  489. if (!csio_is_rnode_ready(fdmi_rn))
  490. CSIO_INC_STATS(ln, n_fdmi_err);
  491. /* Send FDMI cmd to de-register any HBA attributes if registered
  492. * before
  493. */
  494. fdmi_req = ln->mgmt_req;
  495. fdmi_req->lnode = ln;
  496. fdmi_req->rnode = fdmi_rn;
  497. /* Prepare FDMI DHBA cmd */
  498. cmd = fdmi_req->dma_buf.vaddr;
  499. memset(cmd, 0, FC_CT_HDR_LEN);
  500. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_DHBA));
  501. len = FC_CT_HDR_LEN;
  502. hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
  503. memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
  504. len += sizeof(*hbaid);
  505. /* Submit FDMI request */
  506. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
  507. FCOE_CT, &fdmi_req->dma_buf, len)) {
  508. CSIO_INC_STATS(ln, n_fdmi_err);
  509. csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
  510. }
  511. return 0;
  512. }
  513. /*
  514. * csio_ln_vnp_read_cbfn - vnp read completion handler.
  515. * @hw: HW lnode
  516. * @cbfn: Completion handler.
  517. *
  518. * Reads vnp response and updates ln parameters.
  519. */
  520. static void
  521. csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
  522. {
  523. struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
  524. struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
  525. struct fc_els_csp *csp;
  526. struct fc_els_cssp *clsp;
  527. enum fw_retval retval;
  528. retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
  529. if (retval != FW_SUCCESS) {
  530. csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
  531. mempool_free(mbp, hw->mb_mempool);
  532. return;
  533. }
  534. spin_lock_irq(&hw->lock);
  535. memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
  536. memcpy(&ln->nport_id, &rsp->vnport_mac[3],
  537. sizeof(uint8_t)*3);
  538. ln->nport_id = ntohl(ln->nport_id);
  539. ln->nport_id = ln->nport_id>>8;
  540. /* Update WWNs */
  541. /*
  542. * This may look like a duplication of what csio_fcoe_enable_link()
  543. * does, but is absolutely necessary if the vnpi changes between
  544. * a FCOE LINK UP and FCOE LINK DOWN.
  545. */
  546. memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
  547. memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
  548. /* Copy common sparam */
  549. csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
  550. ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
  551. ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
  552. ln->ln_sparm.csp.sp_bb_cred = ntohs(csp->sp_bb_cred);
  553. ln->ln_sparm.csp.sp_features = ntohs(csp->sp_features);
  554. ln->ln_sparm.csp.sp_bb_data = ntohs(csp->sp_bb_data);
  555. ln->ln_sparm.csp.sp_r_a_tov = ntohl(csp->sp_r_a_tov);
  556. ln->ln_sparm.csp.sp_e_d_tov = ntohl(csp->sp_e_d_tov);
  557. /* Copy word 0 & word 1 of class sparam */
  558. clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
  559. ln->ln_sparm.clsp[2].cp_class = ntohs(clsp->cp_class);
  560. ln->ln_sparm.clsp[2].cp_init = ntohs(clsp->cp_init);
  561. ln->ln_sparm.clsp[2].cp_recip = ntohs(clsp->cp_recip);
  562. ln->ln_sparm.clsp[2].cp_rdfs = ntohs(clsp->cp_rdfs);
  563. spin_unlock_irq(&hw->lock);
  564. mempool_free(mbp, hw->mb_mempool);
  565. /* Send an event to update local attribs */
  566. csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
  567. }
  568. /*
  569. * csio_ln_vnp_read - Read vnp params.
  570. * @ln: lnode
  571. * @cbfn: Completion handler.
  572. *
  573. * Issued with lock held.
  574. */
  575. static int
  576. csio_ln_vnp_read(struct csio_lnode *ln,
  577. void (*cbfn) (struct csio_hw *, struct csio_mb *))
  578. {
  579. struct csio_hw *hw = ln->hwp;
  580. struct csio_mb *mbp;
  581. /* Allocate Mbox request */
  582. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  583. if (!mbp) {
  584. CSIO_INC_STATS(hw, n_err_nomem);
  585. return -ENOMEM;
  586. }
  587. /* Prepare VNP Command */
  588. csio_fcoe_vnp_read_init_mb(ln, mbp,
  589. CSIO_MB_DEFAULT_TMO,
  590. ln->fcf_flowid,
  591. ln->vnp_flowid,
  592. cbfn);
  593. /* Issue MBOX cmd */
  594. if (csio_mb_issue(hw, mbp)) {
  595. csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
  596. mempool_free(mbp, hw->mb_mempool);
  597. return -EINVAL;
  598. }
  599. return 0;
  600. }
  601. /*
  602. * csio_fcoe_enable_link - Enable fcoe link.
  603. * @ln: lnode
  604. * @enable: enable/disable
  605. * Issued with lock held.
  606. * Issues mbox cmd to bring up FCOE link on port associated with given ln.
  607. */
  608. static int
  609. csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
  610. {
  611. struct csio_hw *hw = ln->hwp;
  612. struct csio_mb *mbp;
  613. enum fw_retval retval;
  614. uint8_t portid;
  615. uint8_t sub_op;
  616. struct fw_fcoe_link_cmd *lcmd;
  617. int i;
  618. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  619. if (!mbp) {
  620. CSIO_INC_STATS(hw, n_err_nomem);
  621. return -ENOMEM;
  622. }
  623. portid = ln->portid;
  624. sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
  625. csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
  626. sub_op ? "UP" : "DOWN", portid);
  627. csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
  628. portid, sub_op, 0, 0, 0, NULL);
  629. if (csio_mb_issue(hw, mbp)) {
  630. csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
  631. portid);
  632. mempool_free(mbp, hw->mb_mempool);
  633. return -EINVAL;
  634. }
  635. retval = csio_mb_fw_retval(mbp);
  636. if (retval != FW_SUCCESS) {
  637. csio_err(hw,
  638. "FCOE LINK %s cmd on port[%d] failed with "
  639. "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
  640. mempool_free(mbp, hw->mb_mempool);
  641. return -EINVAL;
  642. }
  643. if (!enable)
  644. goto out;
  645. lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
  646. memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
  647. memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
  648. for (i = 0; i < CSIO_MAX_PPORTS; i++)
  649. if (hw->pport[i].portid == portid)
  650. memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
  651. out:
  652. mempool_free(mbp, hw->mb_mempool);
  653. return 0;
  654. }
  655. /*
  656. * csio_ln_read_fcf_cbfn - Read fcf parameters
  657. * @ln: lnode
  658. *
  659. * read fcf response and Update ln fcf information.
  660. */
  661. static void
  662. csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
  663. {
  664. struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
  665. struct csio_fcf_info *fcf_info;
  666. struct fw_fcoe_fcf_cmd *rsp =
  667. (struct fw_fcoe_fcf_cmd *)(mbp->mb);
  668. enum fw_retval retval;
  669. retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
  670. if (retval != FW_SUCCESS) {
  671. csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
  672. retval);
  673. mempool_free(mbp, hw->mb_mempool);
  674. return;
  675. }
  676. spin_lock_irq(&hw->lock);
  677. fcf_info = ln->fcfinfo;
  678. fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
  679. ntohs(rsp->priority_pkd));
  680. fcf_info->vf_id = ntohs(rsp->vf_id);
  681. fcf_info->vlan_id = rsp->vlan_id;
  682. fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
  683. fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
  684. fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
  685. fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
  686. fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
  687. fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
  688. fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
  689. memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
  690. memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
  691. memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
  692. memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
  693. memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
  694. spin_unlock_irq(&hw->lock);
  695. mempool_free(mbp, hw->mb_mempool);
  696. }
  697. /*
  698. * csio_ln_read_fcf_entry - Read fcf entry.
  699. * @ln: lnode
  700. * @cbfn: Completion handler.
  701. *
  702. * Issued with lock held.
  703. */
  704. static int
  705. csio_ln_read_fcf_entry(struct csio_lnode *ln,
  706. void (*cbfn) (struct csio_hw *, struct csio_mb *))
  707. {
  708. struct csio_hw *hw = ln->hwp;
  709. struct csio_mb *mbp;
  710. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  711. if (!mbp) {
  712. CSIO_INC_STATS(hw, n_err_nomem);
  713. return -ENOMEM;
  714. }
  715. /* Get FCoE FCF information */
  716. csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
  717. ln->portid, ln->fcf_flowid, cbfn);
  718. if (csio_mb_issue(hw, mbp)) {
  719. csio_err(hw, "failed to issue FCOE FCF cmd\n");
  720. mempool_free(mbp, hw->mb_mempool);
  721. return -EINVAL;
  722. }
  723. return 0;
  724. }
  725. /*
  726. * csio_handle_link_up - Logical Linkup event.
  727. * @hw - HW module.
  728. * @portid - Physical port number
  729. * @fcfi - FCF index.
  730. * @vnpi - VNP index.
  731. * Returns - none.
  732. *
  733. * This event is received from FW, when virtual link is established between
  734. * Physical port[ENode] and FCF. If its new vnpi, then local node object is
  735. * created on this FCF and set to [ONLINE] state.
  736. * Lnode waits for FW_RDEV_CMD event to be received indicating that
  737. * Fabric login is completed and lnode moves to [READY] state.
  738. *
  739. * This called with hw lock held
  740. */
  741. static void
  742. csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
  743. uint32_t vnpi)
  744. {
  745. struct csio_lnode *ln = NULL;
  746. /* Lookup lnode based on vnpi */
  747. ln = csio_ln_lookup_by_vnpi(hw, vnpi);
  748. if (!ln) {
  749. /* Pick lnode based on portid */
  750. ln = csio_ln_lookup_by_portid(hw, portid);
  751. if (!ln) {
  752. csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
  753. portid);
  754. CSIO_DB_ASSERT(0);
  755. return;
  756. }
  757. /* Check if lnode has valid vnp flowid */
  758. if (ln->vnp_flowid != CSIO_INVALID_IDX) {
  759. /* New VN-Port */
  760. spin_unlock_irq(&hw->lock);
  761. csio_lnode_alloc(hw);
  762. spin_lock_irq(&hw->lock);
  763. if (!ln) {
  764. csio_err(hw,
  765. "failed to allocate fcoe lnode"
  766. "for port:%d vnpi:x%x\n",
  767. portid, vnpi);
  768. CSIO_DB_ASSERT(0);
  769. return;
  770. }
  771. ln->portid = portid;
  772. }
  773. ln->vnp_flowid = vnpi;
  774. ln->dev_num &= ~0xFFFF;
  775. ln->dev_num |= vnpi;
  776. }
  777. /*Initialize fcfi */
  778. ln->fcf_flowid = fcfi;
  779. csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
  780. CSIO_INC_STATS(ln, n_link_up);
  781. /* Send LINKUP event to SM */
  782. csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
  783. }
  784. /*
  785. * csio_post_event_rns
  786. * @ln - FCOE lnode
  787. * @evt - Given rnode event
  788. * Returns - none
  789. *
  790. * Posts given rnode event to all FCOE rnodes connected with given Lnode.
  791. * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
  792. * event.
  793. *
  794. * This called with hw lock held
  795. */
  796. static void
  797. csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
  798. {
  799. struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
  800. struct list_head *tmp, *next;
  801. struct csio_rnode *rn;
  802. list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
  803. rn = (struct csio_rnode *) tmp;
  804. csio_post_event(&rn->sm, evt);
  805. }
  806. }
  807. /*
  808. * csio_cleanup_rns
  809. * @ln - FCOE lnode
  810. * Returns - none
  811. *
  812. * Frees all FCOE rnodes connected with given Lnode.
  813. *
  814. * This called with hw lock held
  815. */
  816. static void
  817. csio_cleanup_rns(struct csio_lnode *ln)
  818. {
  819. struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
  820. struct list_head *tmp, *next_rn;
  821. struct csio_rnode *rn;
  822. list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
  823. rn = (struct csio_rnode *) tmp;
  824. csio_put_rnode(ln, rn);
  825. }
  826. }
  827. /*
  828. * csio_post_event_lns
  829. * @ln - FCOE lnode
  830. * @evt - Given lnode event
  831. * Returns - none
  832. *
  833. * Posts given lnode event to all FCOE lnodes connected with given Lnode.
  834. * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
  835. * event.
  836. *
  837. * This called with hw lock held
  838. */
  839. static void
  840. csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
  841. {
  842. struct list_head *tmp;
  843. struct csio_lnode *cln, *sln;
  844. /* If NPIV lnode, send evt only to that and return */
  845. if (csio_is_npiv_ln(ln)) {
  846. csio_post_event(&ln->sm, evt);
  847. return;
  848. }
  849. sln = ln;
  850. /* Traverse children lnodes list and send evt */
  851. list_for_each(tmp, &sln->cln_head) {
  852. cln = (struct csio_lnode *) tmp;
  853. csio_post_event(&cln->sm, evt);
  854. }
  855. /* Send evt to parent lnode */
  856. csio_post_event(&ln->sm, evt);
  857. }
  858. /*
  859. * csio_ln_down - Lcoal nport is down
  860. * @ln - FCOE Lnode
  861. * Returns - none
  862. *
  863. * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
  864. *
  865. * This called with hw lock held
  866. */
  867. static void
  868. csio_ln_down(struct csio_lnode *ln)
  869. {
  870. csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
  871. }
  872. /*
  873. * csio_handle_link_down - Logical Linkdown event.
  874. * @hw - HW module.
  875. * @portid - Physical port number
  876. * @fcfi - FCF index.
  877. * @vnpi - VNP index.
  878. * Returns - none
  879. *
  880. * This event is received from FW, when virtual link goes down between
  881. * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
  882. * this vnpi[VN-Port] will be de-instantiated.
  883. *
  884. * This called with hw lock held
  885. */
  886. static void
  887. csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
  888. uint32_t vnpi)
  889. {
  890. struct csio_fcf_info *fp;
  891. struct csio_lnode *ln;
  892. /* Lookup lnode based on vnpi */
  893. ln = csio_ln_lookup_by_vnpi(hw, vnpi);
  894. if (ln) {
  895. fp = ln->fcfinfo;
  896. CSIO_INC_STATS(ln, n_link_down);
  897. /*Warn if linkdown received if lnode is not in ready state */
  898. if (!csio_is_lnode_ready(ln)) {
  899. csio_ln_warn(ln,
  900. "warn: FCOE link is already in offline "
  901. "Ignoring Fcoe linkdown event on portid %d\n",
  902. portid);
  903. CSIO_INC_STATS(ln, n_evt_drop);
  904. return;
  905. }
  906. /* Verify portid */
  907. if (fp->portid != portid) {
  908. csio_ln_warn(ln,
  909. "warn: FCOE linkdown recv with "
  910. "invalid port %d\n", portid);
  911. CSIO_INC_STATS(ln, n_evt_drop);
  912. return;
  913. }
  914. /* verify fcfi */
  915. if (ln->fcf_flowid != fcfi) {
  916. csio_ln_warn(ln,
  917. "warn: FCOE linkdown recv with "
  918. "invalid fcfi x%x\n", fcfi);
  919. CSIO_INC_STATS(ln, n_evt_drop);
  920. return;
  921. }
  922. csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
  923. /* Send LINK_DOWN event to lnode s/m */
  924. csio_ln_down(ln);
  925. return;
  926. } else {
  927. csio_warn(hw,
  928. "warn: FCOE linkdown recv with invalid vnpi x%x\n",
  929. vnpi);
  930. CSIO_INC_STATS(hw, n_evt_drop);
  931. }
  932. }
  933. /*
  934. * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
  935. * @ln: Lnode module
  936. *
  937. * Returns True if FCOE lnode is in ready state.
  938. */
  939. int
  940. csio_is_lnode_ready(struct csio_lnode *ln)
  941. {
  942. return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
  943. }
  944. /*****************************************************************************/
  945. /* START: Lnode SM */
  946. /*****************************************************************************/
  947. /*
  948. * csio_lns_uninit - The request in uninit state.
  949. * @ln - FCOE lnode.
  950. * @evt - Event to be processed.
  951. *
  952. * Process the given lnode event which is currently in "uninit" state.
  953. * Invoked with HW lock held.
  954. * Return - none.
  955. */
  956. static void
  957. csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
  958. {
  959. struct csio_hw *hw = csio_lnode_to_hw(ln);
  960. struct csio_lnode *rln = hw->rln;
  961. int rv;
  962. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  963. switch (evt) {
  964. case CSIO_LNE_LINKUP:
  965. csio_set_state(&ln->sm, csio_lns_online);
  966. /* Read FCF only for physical lnode */
  967. if (csio_is_phys_ln(ln)) {
  968. rv = csio_ln_read_fcf_entry(ln,
  969. csio_ln_read_fcf_cbfn);
  970. if (rv != 0) {
  971. /* TODO: Send HW RESET event */
  972. CSIO_INC_STATS(ln, n_err);
  973. break;
  974. }
  975. /* Add FCF record */
  976. list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
  977. }
  978. rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
  979. if (rv != 0) {
  980. /* TODO: Send HW RESET event */
  981. CSIO_INC_STATS(ln, n_err);
  982. }
  983. break;
  984. case CSIO_LNE_DOWN_LINK:
  985. break;
  986. default:
  987. csio_ln_dbg(ln,
  988. "unexp ln event %d recv from did:x%x in "
  989. "ln state[uninit].\n", evt, ln->nport_id);
  990. CSIO_INC_STATS(ln, n_evt_unexp);
  991. break;
  992. } /* switch event */
  993. }
  994. /*
  995. * csio_lns_online - The request in online state.
  996. * @ln - FCOE lnode.
  997. * @evt - Event to be processed.
  998. *
  999. * Process the given lnode event which is currently in "online" state.
  1000. * Invoked with HW lock held.
  1001. * Return - none.
  1002. */
  1003. static void
  1004. csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
  1005. {
  1006. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1007. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  1008. switch (evt) {
  1009. case CSIO_LNE_LINKUP:
  1010. csio_ln_warn(ln,
  1011. "warn: FCOE link is up already "
  1012. "Ignoring linkup on port:%d\n", ln->portid);
  1013. CSIO_INC_STATS(ln, n_evt_drop);
  1014. break;
  1015. case CSIO_LNE_FAB_INIT_DONE:
  1016. csio_set_state(&ln->sm, csio_lns_ready);
  1017. spin_unlock_irq(&hw->lock);
  1018. csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
  1019. spin_lock_irq(&hw->lock);
  1020. break;
  1021. case CSIO_LNE_LINK_DOWN:
  1022. /* Fall through */
  1023. case CSIO_LNE_DOWN_LINK:
  1024. csio_set_state(&ln->sm, csio_lns_uninit);
  1025. if (csio_is_phys_ln(ln)) {
  1026. /* Remove FCF entry */
  1027. list_del_init(&ln->fcfinfo->list);
  1028. }
  1029. break;
  1030. default:
  1031. csio_ln_dbg(ln,
  1032. "unexp ln event %d recv from did:x%x in "
  1033. "ln state[uninit].\n", evt, ln->nport_id);
  1034. CSIO_INC_STATS(ln, n_evt_unexp);
  1035. break;
  1036. } /* switch event */
  1037. }
  1038. /*
  1039. * csio_lns_ready - The request in ready state.
  1040. * @ln - FCOE lnode.
  1041. * @evt - Event to be processed.
  1042. *
  1043. * Process the given lnode event which is currently in "ready" state.
  1044. * Invoked with HW lock held.
  1045. * Return - none.
  1046. */
  1047. static void
  1048. csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
  1049. {
  1050. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1051. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  1052. switch (evt) {
  1053. case CSIO_LNE_FAB_INIT_DONE:
  1054. csio_ln_dbg(ln,
  1055. "ignoring event %d recv from did x%x"
  1056. "in ln state[ready].\n", evt, ln->nport_id);
  1057. CSIO_INC_STATS(ln, n_evt_drop);
  1058. break;
  1059. case CSIO_LNE_LINK_DOWN:
  1060. csio_set_state(&ln->sm, csio_lns_offline);
  1061. csio_post_event_rns(ln, CSIO_RNFE_DOWN);
  1062. spin_unlock_irq(&hw->lock);
  1063. csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
  1064. spin_lock_irq(&hw->lock);
  1065. if (csio_is_phys_ln(ln)) {
  1066. /* Remove FCF entry */
  1067. list_del_init(&ln->fcfinfo->list);
  1068. }
  1069. break;
  1070. case CSIO_LNE_DOWN_LINK:
  1071. csio_set_state(&ln->sm, csio_lns_offline);
  1072. csio_post_event_rns(ln, CSIO_RNFE_DOWN);
  1073. /* Host need to issue aborts in case if FW has not returned
  1074. * WRs with status "ABORTED"
  1075. */
  1076. spin_unlock_irq(&hw->lock);
  1077. csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
  1078. spin_lock_irq(&hw->lock);
  1079. if (csio_is_phys_ln(ln)) {
  1080. /* Remove FCF entry */
  1081. list_del_init(&ln->fcfinfo->list);
  1082. }
  1083. break;
  1084. case CSIO_LNE_CLOSE:
  1085. csio_set_state(&ln->sm, csio_lns_uninit);
  1086. csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
  1087. break;
  1088. case CSIO_LNE_LOGO:
  1089. csio_set_state(&ln->sm, csio_lns_offline);
  1090. csio_post_event_rns(ln, CSIO_RNFE_DOWN);
  1091. break;
  1092. default:
  1093. csio_ln_dbg(ln,
  1094. "unexp ln event %d recv from did:x%x in "
  1095. "ln state[uninit].\n", evt, ln->nport_id);
  1096. CSIO_INC_STATS(ln, n_evt_unexp);
  1097. CSIO_DB_ASSERT(0);
  1098. break;
  1099. } /* switch event */
  1100. }
  1101. /*
  1102. * csio_lns_offline - The request in offline state.
  1103. * @ln - FCOE lnode.
  1104. * @evt - Event to be processed.
  1105. *
  1106. * Process the given lnode event which is currently in "offline" state.
  1107. * Invoked with HW lock held.
  1108. * Return - none.
  1109. */
  1110. static void
  1111. csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
  1112. {
  1113. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1114. struct csio_lnode *rln = hw->rln;
  1115. int rv;
  1116. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  1117. switch (evt) {
  1118. case CSIO_LNE_LINKUP:
  1119. csio_set_state(&ln->sm, csio_lns_online);
  1120. /* Read FCF only for physical lnode */
  1121. if (csio_is_phys_ln(ln)) {
  1122. rv = csio_ln_read_fcf_entry(ln,
  1123. csio_ln_read_fcf_cbfn);
  1124. if (rv != 0) {
  1125. /* TODO: Send HW RESET event */
  1126. CSIO_INC_STATS(ln, n_err);
  1127. break;
  1128. }
  1129. /* Add FCF record */
  1130. list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
  1131. }
  1132. rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
  1133. if (rv != 0) {
  1134. /* TODO: Send HW RESET event */
  1135. CSIO_INC_STATS(ln, n_err);
  1136. }
  1137. break;
  1138. case CSIO_LNE_LINK_DOWN:
  1139. case CSIO_LNE_DOWN_LINK:
  1140. case CSIO_LNE_LOGO:
  1141. csio_ln_dbg(ln,
  1142. "ignoring event %d recv from did x%x"
  1143. "in ln state[offline].\n", evt, ln->nport_id);
  1144. CSIO_INC_STATS(ln, n_evt_drop);
  1145. break;
  1146. case CSIO_LNE_CLOSE:
  1147. csio_set_state(&ln->sm, csio_lns_uninit);
  1148. csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
  1149. break;
  1150. default:
  1151. csio_ln_dbg(ln,
  1152. "unexp ln event %d recv from did:x%x in "
  1153. "ln state[offline]\n", evt, ln->nport_id);
  1154. CSIO_INC_STATS(ln, n_evt_unexp);
  1155. CSIO_DB_ASSERT(0);
  1156. break;
  1157. } /* switch event */
  1158. }
  1159. /*****************************************************************************/
  1160. /* END: Lnode SM */
  1161. /*****************************************************************************/
  1162. static void
  1163. csio_free_fcfinfo(struct kref *kref)
  1164. {
  1165. struct csio_fcf_info *fcfinfo = container_of(kref,
  1166. struct csio_fcf_info, kref);
  1167. kfree(fcfinfo);
  1168. }
  1169. /* Helper routines for attributes */
  1170. /*
  1171. * csio_lnode_state_to_str - Get current state of FCOE lnode.
  1172. * @ln - lnode
  1173. * @str - state of lnode.
  1174. *
  1175. */
  1176. void
  1177. csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
  1178. {
  1179. if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
  1180. strcpy(str, "UNINIT");
  1181. return;
  1182. }
  1183. if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
  1184. strcpy(str, "READY");
  1185. return;
  1186. }
  1187. if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
  1188. strcpy(str, "OFFLINE");
  1189. return;
  1190. }
  1191. strcpy(str, "UNKNOWN");
  1192. } /* csio_lnode_state_to_str */
  1193. int
  1194. csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
  1195. struct fw_fcoe_port_stats *port_stats)
  1196. {
  1197. struct csio_mb *mbp;
  1198. struct fw_fcoe_port_cmd_params portparams;
  1199. enum fw_retval retval;
  1200. int idx;
  1201. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  1202. if (!mbp) {
  1203. csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
  1204. return -EINVAL;
  1205. }
  1206. portparams.portid = portid;
  1207. for (idx = 1; idx <= 3; idx++) {
  1208. portparams.idx = (idx-1)*6 + 1;
  1209. portparams.nstats = 6;
  1210. if (idx == 3)
  1211. portparams.nstats = 4;
  1212. csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
  1213. &portparams, NULL);
  1214. if (csio_mb_issue(hw, mbp)) {
  1215. csio_err(hw, "Issue of FCoE port params failed!\n");
  1216. mempool_free(mbp, hw->mb_mempool);
  1217. return -EINVAL;
  1218. }
  1219. csio_mb_process_portparams_rsp(hw, mbp, &retval,
  1220. &portparams, port_stats);
  1221. }
  1222. mempool_free(mbp, hw->mb_mempool);
  1223. return 0;
  1224. }
  1225. /*
  1226. * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
  1227. * @wr - WR.
  1228. * @len - WR len.
  1229. * This handler is invoked when an outstanding mgmt WR is completed.
  1230. * Its invoked in the context of FW event worker thread for every
  1231. * mgmt event received.
  1232. * Return - none.
  1233. */
  1234. static void
  1235. csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
  1236. {
  1237. struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
  1238. struct csio_ioreq *io_req = NULL;
  1239. struct fw_fcoe_els_ct_wr *wr_cmd;
  1240. wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
  1241. if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
  1242. csio_err(mgmtm->hw,
  1243. "Invalid ELS CT WR length recvd, len:%x\n", len);
  1244. mgmtm->stats.n_err++;
  1245. return;
  1246. }
  1247. io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
  1248. io_req->wr_status = csio_wr_status(wr_cmd);
  1249. /* lookup ioreq exists in our active Q */
  1250. spin_lock_irq(&hw->lock);
  1251. if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
  1252. csio_err(mgmtm->hw,
  1253. "Error- Invalid IO handle recv in WR. handle: %p\n",
  1254. io_req);
  1255. mgmtm->stats.n_err++;
  1256. spin_unlock_irq(&hw->lock);
  1257. return;
  1258. }
  1259. mgmtm = csio_hw_to_mgmtm(hw);
  1260. /* Dequeue from active queue */
  1261. list_del_init(&io_req->sm.sm_list);
  1262. mgmtm->stats.n_active--;
  1263. spin_unlock_irq(&hw->lock);
  1264. /* io_req will be freed by completion handler */
  1265. if (io_req->io_cbfn)
  1266. io_req->io_cbfn(hw, io_req);
  1267. }
  1268. /**
  1269. * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
  1270. * @hw: HW module
  1271. * @cpl_op: CPL opcode
  1272. * @cmd: FW cmd/WR.
  1273. *
  1274. * Process received FCoE cmd/WR event from FW.
  1275. */
  1276. void
  1277. csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
  1278. {
  1279. struct csio_lnode *ln;
  1280. struct csio_rnode *rn;
  1281. uint8_t portid, opcode = *(uint8_t *)cmd;
  1282. struct fw_fcoe_link_cmd *lcmd;
  1283. struct fw_wr_hdr *wr;
  1284. struct fw_rdev_wr *rdev_wr;
  1285. enum fw_fcoe_link_status lstatus;
  1286. uint32_t fcfi, rdev_flowid, vnpi;
  1287. enum csio_ln_ev evt;
  1288. if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
  1289. lcmd = (struct fw_fcoe_link_cmd *)cmd;
  1290. lstatus = lcmd->lstatus;
  1291. portid = FW_FCOE_LINK_CMD_PORTID_GET(
  1292. ntohl(lcmd->op_to_portid));
  1293. fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
  1294. vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
  1295. if (lstatus == FCOE_LINKUP) {
  1296. /* HW lock here */
  1297. spin_lock_irq(&hw->lock);
  1298. csio_handle_link_up(hw, portid, fcfi, vnpi);
  1299. spin_unlock_irq(&hw->lock);
  1300. /* HW un lock here */
  1301. } else if (lstatus == FCOE_LINKDOWN) {
  1302. /* HW lock here */
  1303. spin_lock_irq(&hw->lock);
  1304. csio_handle_link_down(hw, portid, fcfi, vnpi);
  1305. spin_unlock_irq(&hw->lock);
  1306. /* HW un lock here */
  1307. } else {
  1308. csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
  1309. ntohl(lcmd->lstatus));
  1310. CSIO_INC_STATS(hw, n_cpl_unexp);
  1311. }
  1312. } else if (cpl_op == CPL_FW6_PLD) {
  1313. wr = (struct fw_wr_hdr *) (cmd + 4);
  1314. if (FW_WR_OP_GET(be32_to_cpu(wr->hi))
  1315. == FW_RDEV_WR) {
  1316. rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
  1317. rdev_flowid = FW_RDEV_WR_FLOWID_GET(
  1318. ntohl(rdev_wr->alloc_to_len16));
  1319. vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
  1320. ntohl(rdev_wr->flags_to_assoc_flowid));
  1321. csio_dbg(hw,
  1322. "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
  1323. "vnpi:0x%x\n", rdev_flowid,
  1324. rdev_wr->event_cause, vnpi);
  1325. if (rdev_wr->protocol != PROT_FCOE) {
  1326. csio_err(hw,
  1327. "FW_RDEV_WR: invalid proto:x%x "
  1328. "received with flowid:x%x\n",
  1329. rdev_wr->protocol,
  1330. rdev_flowid);
  1331. CSIO_INC_STATS(hw, n_evt_drop);
  1332. return;
  1333. }
  1334. /* HW lock here */
  1335. spin_lock_irq(&hw->lock);
  1336. ln = csio_ln_lookup_by_vnpi(hw, vnpi);
  1337. if (!ln) {
  1338. csio_err(hw,
  1339. "FW_DEV_WR: invalid vnpi:x%x received "
  1340. "with flowid:x%x\n", vnpi, rdev_flowid);
  1341. CSIO_INC_STATS(hw, n_evt_drop);
  1342. goto out_pld;
  1343. }
  1344. rn = csio_confirm_rnode(ln, rdev_flowid,
  1345. &rdev_wr->u.fcoe_rdev);
  1346. if (!rn) {
  1347. csio_ln_dbg(ln,
  1348. "Failed to confirm rnode "
  1349. "for flowid:x%x\n", rdev_flowid);
  1350. CSIO_INC_STATS(hw, n_evt_drop);
  1351. goto out_pld;
  1352. }
  1353. /* save previous event for debugging */
  1354. ln->prev_evt = ln->cur_evt;
  1355. ln->cur_evt = rdev_wr->event_cause;
  1356. CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
  1357. /* Translate all the fabric events to lnode SM events */
  1358. evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
  1359. if (evt) {
  1360. csio_ln_dbg(ln,
  1361. "Posting event to lnode event:%d "
  1362. "cause:%d flowid:x%x\n", evt,
  1363. rdev_wr->event_cause, rdev_flowid);
  1364. csio_post_event(&ln->sm, evt);
  1365. }
  1366. /* Handover event to rn SM here. */
  1367. csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
  1368. out_pld:
  1369. spin_unlock_irq(&hw->lock);
  1370. return;
  1371. } else {
  1372. csio_warn(hw, "unexpected WR op(0x%x) recv\n",
  1373. FW_WR_OP_GET(be32_to_cpu((wr->hi))));
  1374. CSIO_INC_STATS(hw, n_cpl_unexp);
  1375. }
  1376. } else if (cpl_op == CPL_FW6_MSG) {
  1377. wr = (struct fw_wr_hdr *) (cmd);
  1378. if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
  1379. csio_ln_mgmt_wr_handler(hw, wr,
  1380. sizeof(struct fw_fcoe_els_ct_wr));
  1381. } else {
  1382. csio_warn(hw, "unexpected WR op(0x%x) recv\n",
  1383. FW_WR_OP_GET(be32_to_cpu((wr->hi))));
  1384. CSIO_INC_STATS(hw, n_cpl_unexp);
  1385. }
  1386. } else {
  1387. csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
  1388. CSIO_INC_STATS(hw, n_cpl_unexp);
  1389. }
  1390. }
  1391. /**
  1392. * csio_lnode_start - Kickstart lnode discovery.
  1393. * @ln: lnode
  1394. *
  1395. * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
  1396. */
  1397. int
  1398. csio_lnode_start(struct csio_lnode *ln)
  1399. {
  1400. int rv = 0;
  1401. if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
  1402. rv = csio_fcoe_enable_link(ln, 1);
  1403. ln->flags |= CSIO_LNF_LINK_ENABLE;
  1404. }
  1405. return rv;
  1406. }
  1407. /**
  1408. * csio_lnode_stop - Stop the lnode.
  1409. * @ln: lnode
  1410. *
  1411. * This routine is invoked by HW module to stop lnode and its associated NPIV
  1412. * lnodes.
  1413. */
  1414. void
  1415. csio_lnode_stop(struct csio_lnode *ln)
  1416. {
  1417. csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
  1418. if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
  1419. csio_fcoe_enable_link(ln, 0);
  1420. ln->flags &= ~CSIO_LNF_LINK_ENABLE;
  1421. }
  1422. csio_ln_dbg(ln, "stopping ln :%p\n", ln);
  1423. }
  1424. /**
  1425. * csio_lnode_close - Close an lnode.
  1426. * @ln: lnode
  1427. *
  1428. * This routine is invoked by HW module to close an lnode and its
  1429. * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
  1430. * set to uninitialized state.
  1431. */
  1432. void
  1433. csio_lnode_close(struct csio_lnode *ln)
  1434. {
  1435. csio_post_event_lns(ln, CSIO_LNE_CLOSE);
  1436. if (csio_is_phys_ln(ln))
  1437. ln->vnp_flowid = CSIO_INVALID_IDX;
  1438. csio_ln_dbg(ln, "closed ln :%p\n", ln);
  1439. }
  1440. /*
  1441. * csio_ln_prep_ecwr - Prepare ELS/CT WR.
  1442. * @io_req - IO request.
  1443. * @wr_len - WR len
  1444. * @immd_len - WR immediate data
  1445. * @sub_op - Sub opcode
  1446. * @sid - source portid.
  1447. * @did - destination portid
  1448. * @flow_id - flowid
  1449. * @fw_wr - ELS/CT WR to be prepared.
  1450. * Returns: 0 - on success
  1451. */
  1452. static int
  1453. csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
  1454. uint32_t immd_len, uint8_t sub_op, uint32_t sid,
  1455. uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
  1456. {
  1457. struct fw_fcoe_els_ct_wr *wr;
  1458. uint32_t port_id;
  1459. wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
  1460. wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) |
  1461. FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
  1462. wr_len = DIV_ROUND_UP(wr_len, 16);
  1463. wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) |
  1464. FW_WR_LEN16(wr_len));
  1465. wr->els_ct_type = sub_op;
  1466. wr->ctl_pri = 0;
  1467. wr->cp_en_class = 0;
  1468. wr->cookie = io_req->fw_handle;
  1469. wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(
  1470. io_req->lnode->hwp, io_req->iq_idx));
  1471. wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
  1472. wr->tmo_val = (uint8_t) io_req->tmo;
  1473. port_id = htonl(sid);
  1474. memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
  1475. port_id = htonl(did);
  1476. memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
  1477. /* Prepare RSP SGL */
  1478. wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
  1479. wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
  1480. return 0;
  1481. }
  1482. /*
  1483. * csio_ln_mgmt_submit_wr - Post elsct work request.
  1484. * @mgmtm - mgmtm
  1485. * @io_req - io request.
  1486. * @sub_op - ELS or CT request type
  1487. * @pld - Dma Payload buffer
  1488. * @pld_len - Payload len
  1489. * Prepares ELSCT Work request and sents it to FW.
  1490. * Returns: 0 - on success
  1491. */
  1492. static int
  1493. csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
  1494. uint8_t sub_op, struct csio_dma_buf *pld,
  1495. uint32_t pld_len)
  1496. {
  1497. struct csio_wr_pair wrp;
  1498. struct csio_lnode *ln = io_req->lnode;
  1499. struct csio_rnode *rn = io_req->rnode;
  1500. struct csio_hw *hw = mgmtm->hw;
  1501. uint8_t fw_wr[64];
  1502. struct ulptx_sgl dsgl;
  1503. uint32_t wr_size = 0;
  1504. uint8_t im_len = 0;
  1505. uint32_t wr_off = 0;
  1506. int ret = 0;
  1507. /* Calculate WR Size for this ELS REQ */
  1508. wr_size = sizeof(struct fw_fcoe_els_ct_wr);
  1509. /* Send as immediate data if pld < 256 */
  1510. if (pld_len < 256) {
  1511. wr_size += ALIGN(pld_len, 8);
  1512. im_len = (uint8_t)pld_len;
  1513. } else
  1514. wr_size += sizeof(struct ulptx_sgl);
  1515. /* Roundup WR size in units of 16 bytes */
  1516. wr_size = ALIGN(wr_size, 16);
  1517. /* Get WR to send ELS REQ */
  1518. ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
  1519. if (ret != 0) {
  1520. csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
  1521. io_req, ret);
  1522. return ret;
  1523. }
  1524. /* Prepare Generic WR used by all ELS/CT cmd */
  1525. csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
  1526. ln->nport_id, rn->nport_id,
  1527. csio_rn_flowid(rn),
  1528. &fw_wr[0]);
  1529. /* Copy ELS/CT WR CMD */
  1530. csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
  1531. sizeof(struct fw_fcoe_els_ct_wr));
  1532. wr_off += sizeof(struct fw_fcoe_els_ct_wr);
  1533. /* Copy payload to Immediate section of WR */
  1534. if (im_len)
  1535. csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
  1536. else {
  1537. /* Program DSGL to dma payload */
  1538. dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
  1539. ULPTX_MORE | ULPTX_NSGE(1));
  1540. dsgl.len0 = cpu_to_be32(pld_len);
  1541. dsgl.addr0 = cpu_to_be64(pld->paddr);
  1542. csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
  1543. sizeof(struct ulptx_sgl));
  1544. }
  1545. /* Issue work request to xmit ELS/CT req to FW */
  1546. csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
  1547. return ret;
  1548. }
  1549. /*
  1550. * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
  1551. * @io_req - IO Request
  1552. * @io_cbfn - Completion handler.
  1553. * @req_type - ELS or CT request type
  1554. * @pld - Dma Payload buffer
  1555. * @pld_len - Payload len
  1556. *
  1557. *
  1558. * This API used submit managment ELS/CT request.
  1559. * This called with hw lock held
  1560. * Returns: 0 - on success
  1561. * -ENOMEM - on error.
  1562. */
  1563. static int
  1564. csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
  1565. void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
  1566. enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
  1567. uint32_t pld_len)
  1568. {
  1569. struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
  1570. struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
  1571. int rv;
  1572. io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
  1573. io_req->fw_handle = (uintptr_t) (io_req);
  1574. io_req->eq_idx = mgmtm->eq_idx;
  1575. io_req->iq_idx = mgmtm->iq_idx;
  1576. rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
  1577. if (rv == 0) {
  1578. list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
  1579. mgmtm->stats.n_active++;
  1580. }
  1581. return rv;
  1582. }
  1583. /*
  1584. * csio_ln_fdmi_init - FDMI Init entry point.
  1585. * @ln: lnode
  1586. */
  1587. static int
  1588. csio_ln_fdmi_init(struct csio_lnode *ln)
  1589. {
  1590. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1591. struct csio_dma_buf *dma_buf;
  1592. /* Allocate MGMT request required for FDMI */
  1593. ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
  1594. if (!ln->mgmt_req) {
  1595. csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
  1596. CSIO_INC_STATS(hw, n_err_nomem);
  1597. return -ENOMEM;
  1598. }
  1599. /* Allocate Dma buffers for FDMI response Payload */
  1600. dma_buf = &ln->mgmt_req->dma_buf;
  1601. dma_buf->len = 2048;
  1602. dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
  1603. &dma_buf->paddr);
  1604. if (!dma_buf->vaddr) {
  1605. csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
  1606. kfree(ln->mgmt_req);
  1607. ln->mgmt_req = NULL;
  1608. return -ENOMEM;
  1609. }
  1610. ln->flags |= CSIO_LNF_FDMI_ENABLE;
  1611. return 0;
  1612. }
  1613. /*
  1614. * csio_ln_fdmi_exit - FDMI exit entry point.
  1615. * @ln: lnode
  1616. */
  1617. static int
  1618. csio_ln_fdmi_exit(struct csio_lnode *ln)
  1619. {
  1620. struct csio_dma_buf *dma_buf;
  1621. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1622. if (!ln->mgmt_req)
  1623. return 0;
  1624. dma_buf = &ln->mgmt_req->dma_buf;
  1625. if (dma_buf->vaddr)
  1626. pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
  1627. dma_buf->paddr);
  1628. kfree(ln->mgmt_req);
  1629. return 0;
  1630. }
  1631. int
  1632. csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
  1633. unsigned long time, unsigned long max_scan_ticks,
  1634. unsigned long delta_scan_ticks)
  1635. {
  1636. int rv = 0;
  1637. if (time >= max_scan_ticks)
  1638. return 1;
  1639. if (!ln->tgt_scan_tick)
  1640. ln->tgt_scan_tick = ticks;
  1641. if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
  1642. if (!ln->last_scan_ntgts)
  1643. ln->last_scan_ntgts = ln->n_scsi_tgts;
  1644. else {
  1645. if (ln->last_scan_ntgts == ln->n_scsi_tgts)
  1646. return 1;
  1647. ln->last_scan_ntgts = ln->n_scsi_tgts;
  1648. }
  1649. ln->tgt_scan_tick = ticks;
  1650. }
  1651. return rv;
  1652. }
  1653. /*
  1654. * csio_notify_lnodes:
  1655. * @hw: HW module
  1656. * @note: Notification
  1657. *
  1658. * Called from the HW SM to fan out notifications to the
  1659. * Lnode SM. Since the HW SM is entered with lock held,
  1660. * there is no need to hold locks here.
  1661. *
  1662. */
  1663. void
  1664. csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
  1665. {
  1666. struct list_head *tmp;
  1667. struct csio_lnode *ln;
  1668. csio_dbg(hw, "Notifying all nodes of event %d\n", note);
  1669. /* Traverse children lnodes list and send evt */
  1670. list_for_each(tmp, &hw->sln_head) {
  1671. ln = (struct csio_lnode *) tmp;
  1672. switch (note) {
  1673. case CSIO_LN_NOTIFY_HWREADY:
  1674. csio_lnode_start(ln);
  1675. break;
  1676. case CSIO_LN_NOTIFY_HWRESET:
  1677. case CSIO_LN_NOTIFY_HWREMOVE:
  1678. csio_lnode_close(ln);
  1679. break;
  1680. case CSIO_LN_NOTIFY_HWSTOP:
  1681. csio_lnode_stop(ln);
  1682. break;
  1683. default:
  1684. break;
  1685. }
  1686. }
  1687. }
  1688. /*
  1689. * csio_disable_lnodes:
  1690. * @hw: HW module
  1691. * @portid:port id
  1692. * @disable: disable/enable flag.
  1693. * If disable=1, disables all lnode hosted on given physical port.
  1694. * otherwise enables all the lnodes on given phsysical port.
  1695. * This routine need to called with hw lock held.
  1696. */
  1697. void
  1698. csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
  1699. {
  1700. struct list_head *tmp;
  1701. struct csio_lnode *ln;
  1702. csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
  1703. /* Traverse sibling lnodes list and send evt */
  1704. list_for_each(tmp, &hw->sln_head) {
  1705. ln = (struct csio_lnode *) tmp;
  1706. if (ln->portid != portid)
  1707. continue;
  1708. if (disable)
  1709. csio_lnode_stop(ln);
  1710. else
  1711. csio_lnode_start(ln);
  1712. }
  1713. }
  1714. /*
  1715. * csio_ln_init - Initialize an lnode.
  1716. * @ln: lnode
  1717. *
  1718. */
  1719. static int
  1720. csio_ln_init(struct csio_lnode *ln)
  1721. {
  1722. int rv = -EINVAL;
  1723. struct csio_lnode *rln, *pln;
  1724. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1725. csio_init_state(&ln->sm, csio_lns_uninit);
  1726. ln->vnp_flowid = CSIO_INVALID_IDX;
  1727. ln->fcf_flowid = CSIO_INVALID_IDX;
  1728. if (csio_is_root_ln(ln)) {
  1729. /* This is the lnode used during initialization */
  1730. ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
  1731. if (!ln->fcfinfo) {
  1732. csio_ln_err(ln, "Failed to alloc FCF record\n");
  1733. CSIO_INC_STATS(hw, n_err_nomem);
  1734. goto err;
  1735. }
  1736. INIT_LIST_HEAD(&ln->fcf_lsthead);
  1737. kref_init(&ln->fcfinfo->kref);
  1738. if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
  1739. goto err;
  1740. } else { /* Either a non-root physical or a virtual lnode */
  1741. /*
  1742. * THe rest is common for non-root physical and NPIV lnodes.
  1743. * Just get references to all other modules
  1744. */
  1745. rln = csio_root_lnode(ln);
  1746. if (csio_is_npiv_ln(ln)) {
  1747. /* NPIV */
  1748. pln = csio_parent_lnode(ln);
  1749. kref_get(&pln->fcfinfo->kref);
  1750. ln->fcfinfo = pln->fcfinfo;
  1751. } else {
  1752. /* Another non-root physical lnode (FCF) */
  1753. ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
  1754. GFP_KERNEL);
  1755. if (!ln->fcfinfo) {
  1756. csio_ln_err(ln, "Failed to alloc FCF info\n");
  1757. CSIO_INC_STATS(hw, n_err_nomem);
  1758. goto err;
  1759. }
  1760. kref_init(&ln->fcfinfo->kref);
  1761. if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
  1762. goto err;
  1763. }
  1764. } /* if (!csio_is_root_ln(ln)) */
  1765. return 0;
  1766. err:
  1767. return rv;
  1768. }
  1769. static void
  1770. csio_ln_exit(struct csio_lnode *ln)
  1771. {
  1772. struct csio_lnode *pln;
  1773. csio_cleanup_rns(ln);
  1774. if (csio_is_npiv_ln(ln)) {
  1775. pln = csio_parent_lnode(ln);
  1776. kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
  1777. } else {
  1778. kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
  1779. if (csio_fdmi_enable)
  1780. csio_ln_fdmi_exit(ln);
  1781. }
  1782. ln->fcfinfo = NULL;
  1783. }
  1784. /**
  1785. * csio_lnode_init - Initialize the members of an lnode.
  1786. * @ln: lnode
  1787. *
  1788. */
  1789. int
  1790. csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
  1791. struct csio_lnode *pln)
  1792. {
  1793. int rv = -EINVAL;
  1794. /* Link this lnode to hw */
  1795. csio_lnode_to_hw(ln) = hw;
  1796. /* Link child to parent if child lnode */
  1797. if (pln)
  1798. ln->pln = pln;
  1799. else
  1800. ln->pln = NULL;
  1801. /* Initialize scsi_tgt and timers to zero */
  1802. ln->n_scsi_tgts = 0;
  1803. ln->last_scan_ntgts = 0;
  1804. ln->tgt_scan_tick = 0;
  1805. /* Initialize rnode list */
  1806. INIT_LIST_HEAD(&ln->rnhead);
  1807. INIT_LIST_HEAD(&ln->cln_head);
  1808. /* Initialize log level for debug */
  1809. ln->params.log_level = hw->params.log_level;
  1810. if (csio_ln_init(ln))
  1811. goto err;
  1812. /* Add lnode to list of sibling or children lnodes */
  1813. spin_lock_irq(&hw->lock);
  1814. list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
  1815. if (pln)
  1816. pln->num_vports++;
  1817. spin_unlock_irq(&hw->lock);
  1818. hw->num_lns++;
  1819. return 0;
  1820. err:
  1821. csio_lnode_to_hw(ln) = NULL;
  1822. return rv;
  1823. }
  1824. /**
  1825. * csio_lnode_exit - De-instantiate an lnode.
  1826. * @ln: lnode
  1827. *
  1828. */
  1829. void
  1830. csio_lnode_exit(struct csio_lnode *ln)
  1831. {
  1832. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1833. csio_ln_exit(ln);
  1834. /* Remove this lnode from hw->sln_head */
  1835. spin_lock_irq(&hw->lock);
  1836. list_del_init(&ln->sm.sm_list);
  1837. /* If it is children lnode, decrement the
  1838. * counter in its parent lnode
  1839. */
  1840. if (ln->pln)
  1841. ln->pln->num_vports--;
  1842. /* Update root lnode pointer */
  1843. if (list_empty(&hw->sln_head))
  1844. hw->rln = NULL;
  1845. else
  1846. hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
  1847. spin_unlock_irq(&hw->lock);
  1848. csio_lnode_to_hw(ln) = NULL;
  1849. hw->num_lns--;
  1850. }